From d6531ab6e50149ab2a144b0f4787cb9277d0893f Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Thu, 18 Aug 2022 20:48:04 +0800 Subject: sched/fair: Move task sched_avg attach to enqueue_task_fair() When wake_up_new_task(), we use post_init_entity_util_avg() to init util_avg/runnable_avg based on cpu's util_avg at that time, and attach task sched_avg to cfs_rq. Since enqueue_task_fair() -> enqueue_entity() -> update_load_avg() loop will do attach, we can move this work to update_load_avg(). wake_up_new_task(p) post_init_entity_util_avg(p) attach_entity_cfs_rq() --> (1) activate_task(rq, p) enqueue_task() := enqueue_task_fair() enqueue_entity() loop update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH) if (!se->avg.last_update_time && (flags & DO_ATTACH)) attach_entity_load_avg() --> (2) This patch move attach from (1) to (2), update related comments too. Signed-off-by: Chengming Zhou Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20220818124805.601-9-zhouchengming@bytedance.com --- kernel/sched/fair.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd1aa4c92b2d..ef325b561df4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -799,8 +799,6 @@ void init_entity_runnable_average(struct sched_entity *se) /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ } -static void attach_entity_cfs_rq(struct sched_entity *se); - /* * With new tasks being created, their initial util_avgs are extrapolated * based on the cfs_rq's current util_avg: @@ -863,8 +861,6 @@ void post_init_entity_util_avg(struct task_struct *p) se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); return; } - - attach_entity_cfs_rq(se); } #else /* !CONFIG_SMP */ @@ -3838,8 +3834,7 @@ static void migrate_se_pelt_lag(struct sched_entity *se) {} * @cfs_rq: cfs_rq to update * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) - * avg. The immediate corollary is that all (fair) tasks must be attached, see - * post_init_entity_util_avg(). + * avg. The immediate corollary is that all (fair) tasks must be attached. * * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. * @@ -4072,8 +4067,8 @@ static void remove_entity_load_avg(struct sched_entity *se) /* * tasks cannot exit without having gone through wake_up_new_task() -> - * post_init_entity_util_avg() which will have added things to the - * cfs_rq, so we can remove unconditionally. + * enqueue_task_fair() which will have added things to the cfs_rq, + * so we can remove unconditionally. */ sync_entity_load_avg(se); -- cgit v1.2.3-59-g8ed1b