aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/sched.h3
2 files changed, 10 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index eee4ee655db2..b64f163d512c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1953,7 +1953,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
while (p->on_cpu)
cpu_relax();
/*
- * Pairs with the smp_wmb() in finish_lock_switch().
+ * Combined with the control dependency above, we have an effective
+ * smp_load_acquire() without the need for full barriers.
+ *
+ * Pairs with the smp_store_release() in finish_lock_switch().
+ *
+ * This ensures that tasks getting woken will be fully ordered against
+ * their previous state and preserve Program Order.
*/
smp_rmb();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efd3bfc7e347..b242775bf670 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
* We must ensure this doesn't happen until the switch is completely
* finished.
*
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
* Pairs with the control dependency and rmb in try_to_wake_up().
*/
smp_store_release(&prev->on_cpu, 0);