aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-07-22 10:22:02 +0200
committerPeter Zijlstra <peterz@infradead.org>2020-07-22 10:22:02 +0200
commit015dc08918785201199ed3450c22bb8939f09dfe (patch)
tree7ba52e0b1e518fa750aaac0c1da8dd70c3eca1eb /kernel/sched
parentsched: Add a tracepoint to track rq->nr_running (diff)
parentsched: Fix race against ptrace_freeze_trace() (diff)
downloadwireguard-linux-015dc08918785201199ed3450c22bb8939f09dfe.tar.xz
wireguard-linux-015dc08918785201199ed3450c22bb8939f09dfe.zip
Merge branch 'sched/urgent'
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c24
-rw-r--r--kernel/sched/fair.c15
2 files changed, 27 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ff0519551188..08d02ce26b71 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4193,9 +4193,6 @@ static void __sched notrace __schedule(bool preempt)
local_irq_disable();
rcu_note_context_switch(preempt);
- /* See deactivate_task() below. */
- prev_state = prev->state;
-
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
@@ -4219,11 +4216,16 @@ static void __sched notrace __schedule(bool preempt)
update_rq_clock(rq);
switch_count = &prev->nivcsw;
+
/*
- * We must re-load prev->state in case ttwu_remote() changed it
- * before we acquired rq->lock.
+ * We must load prev->state once (task_struct::state is volatile), such
+ * that:
+ *
+ * - we form a control dependency vs deactivate_task() below.
+ * - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
- if (!preempt && prev_state && prev_state == prev->state) {
+ prev_state = prev->state;
+ if (!preempt && prev_state) {
if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING;
} else {
@@ -4237,10 +4239,12 @@ static void __sched notrace __schedule(bool preempt)
/*
* __schedule() ttwu()
- * prev_state = prev->state; if (READ_ONCE(p->on_rq) && ...)
- * LOCK rq->lock goto out;
- * smp_mb__after_spinlock(); smp_acquire__after_ctrl_dep();
- * p->on_rq = 0; p->state = TASK_WAKING;
+ * prev_state = prev->state; if (p->on_rq && ...)
+ * if (prev_state) goto out;
+ * p->on_rq = 0; smp_acquire__after_ctrl_dep();
+ * p->state = TASK_WAKING
+ *
+ * Where __schedule() and ttwu() have matching control dependencies.
*
* After this, schedule() must not care about p->state any more.
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3213cb247aff..98a53a2fe354 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4047,7 +4047,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
return;
}
- rq->misfit_task_load = task_h_load(p);
+ /*
+ * Make sure that misfit_task_load will not be null even if
+ * task_h_load() returns 0.
+ */
+ rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
#else /* CONFIG_SMP */
@@ -7646,7 +7650,14 @@ static int detach_tasks(struct lb_env *env)
switch (env->migration_type) {
case migrate_load:
- load = task_h_load(p);
+ /*
+ * Depending of the number of CPUs and tasks and the
+ * cgroup hierarchy, task_h_load() can return a null
+ * value. Make sure that env->imbalance decreases
+ * otherwise detach_tasks() will stop only after
+ * detaching up to loop_max tasks.
+ */
+ load = max_t(unsigned long, task_h_load(p), 1);
if (sched_feat(LB_MIN) &&
load < 16 && !env->sd->nr_balance_failed)