From c63be7be59de65d12ff7b4329acea99cf734d6de Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 18 Oct 2019 15:26:35 +0200 Subject: sched/fair: Use utilization to select misfit task Utilization is used to detect a misfit task but the load is then used to select the task on the CPU which can lead to select a small task with high weight instead of the task that triggered the misfit migration. Check that task can't fit the CPU's capacity when selecting the misfit task instead of using the load. Signed-off-by: Vincent Guittot Acked-by: Valentin Schneider Cc: Ben Segall Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Mike Galbraith Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: hdanton@sina.com Cc: parth@linux.ibm.com Cc: pauld@redhat.com Cc: quentin.perret@arm.com Cc: riel@surriel.com Cc: srikar@linux.vnet.ibm.com Link: https://lkml.kernel.org/r/1571405198-27570-9-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f489f603f317..1fd6f3917fe7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7408,13 +7408,8 @@ static int detach_tasks(struct lb_env *env) break; case migrate_misfit: - load = task_h_load(p); - - /* - * Load of misfit task might decrease a bit since it has - * been recorded. Be conservative in the condition. - */ - if (load/2 < env->imbalance) + /* This is not a misfit task */ + if (task_fits_capacity(p, capacity_of(env->src_cpu))) goto next; env->imbalance = 0; @@ -8358,7 +8353,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s if (busiest->group_type == group_misfit_task) { /* Set imbalance to allow misfit tasks to be balanced. */ env->migration_type = migrate_misfit; - env->imbalance = busiest->group_misfit_task_load; + env->imbalance = 1; return; } -- cgit v1.2.3-59-g8ed1b