aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2022-08-18 20:47:57 +0800
committerPeter Zijlstra <peterz@infradead.org>2022-08-23 11:01:17 +0200
commit78b6b15770618efb60d84e2d605f6b93dc94051b (patch)
tree88e2e48523e6e4f96f5d5a228594df6d9e19da46 /kernel/sched
parentsched/core: Remove superfluous semicolon (diff)
downloadlinux-dev-78b6b15770618efb60d84e2d605f6b93dc94051b.tar.xz
linux-dev-78b6b15770618efb60d84e2d605f6b93dc94051b.zip
sched/fair: Maintain task se depth in set_task_rq()
Previously we only maintain task se depth in task_move_group_fair(), if a !fair task change task group, its se depth will not be updated, so commit eb7a59b2c888 ("sched/fair: Reset se-depth when task switched to FAIR") fix the problem by updating se depth in switched_to_fair() too. Then commit daa59407b558 ("sched/fair: Unify switched_{from,to}_fair() and task_move_group_fair()") unified these two functions, moved se.depth setting to attach_task_cfs_rq(), which further into attach_entity_cfs_rq() with commit df217913e72e ("sched/fair: Factorize attach/detach entity"). This patch move task se depth maintenance from attach_entity_cfs_rq() to set_task_rq(), which will be called when CPU/cgroup change, so its depth will always be correct. This patch is preparation for the next patch. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20220818124805.601-2-zhouchengming@bytedance.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h1
2 files changed, 1 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index da388657d5ac..a3b0f8b1029e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11562,14 +11562,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Since the real-depth could have been changed (only FAIR
- * class maintain depth value), reset depth properly.
- */
- se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3ccd35c22f0f..4c4822141026 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1930,6 +1930,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
p->se.cfs_rq = tg->cfs_rq[cpu];
p->se.parent = tg->se[cpu];
+ p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
#endif
#ifdef CONFIG_RT_GROUP_SCHED