aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-09-06 12:39:55 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-09-07 21:53:49 +0200
commitf9fc8cad9728124cefe8844fb53d1814c92c6bfc (patch)
tree1d8a1b332af2b0f921951310852284b8e556f162 /kernel/sched/core.c
parentsched: Change wait_task_inactive()s match_state (diff)
downloadlinux-dev-f9fc8cad9728124cefe8844fb53d1814c92c6bfc.tar.xz
linux-dev-f9fc8cad9728124cefe8844fb53d1814c92c6bfc.zip
sched: Add TASK_ANY for wait_task_inactive()
Now that wait_task_inactive()'s @match_state argument is a mask (like ttwu()) it is possible to replace the special !match_state case with an 'all-states' value such that any blocked state will match. Suggested-by: Ingo Molnar (mingo@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/YxhkzfuFTvRnpUaH@hirez.programming.kicks-ass.net
Diffstat (limited to '')
-rw-r--r--kernel/sched/core.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 43d71c696125..ea26526b45bb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3253,12 +3253,12 @@ out:
/*
* wait_task_inactive - wait for a thread to unschedule.
*
- * If @match_state is nonzero, it's the @p->state value just checked and
- * not expected to change. If it changes, i.e. @p might have woken up,
- * then return zero. When we succeed in waiting for @p to be off its CPU,
- * we return a positive number (its total switch count). If a second call
- * a short while later returns the same number, the caller can be sure that
- * @p has remained unscheduled the whole time.
+ * Wait for the thread to block in any of the states set in @match_state.
+ * If it changes, i.e. @p might have woken up, then return zero. When we
+ * succeed in waiting for @p to be off its CPU, we return a positive number
+ * (its total switch count). If a second call a short while later returns the
+ * same number, the caller can be sure that @p has remained unscheduled the
+ * whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
@@ -3294,7 +3294,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else!
*/
while (task_on_cpu(rq, p)) {
- if (match_state && !(READ_ONCE(p->__state) & match_state))
+ if (!(READ_ONCE(p->__state) & match_state))
return 0;
cpu_relax();
}
@@ -3309,7 +3309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_on_cpu(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (!match_state || (READ_ONCE(p->__state) & match_state))
+ if (READ_ONCE(p->__state) & match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);