aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2019-10-18 15:26:34 +0200
committerIngo Molnar <mingo@kernel.org>2019-10-21 09:40:54 +0200
commit2ab4092fc82d6001fdd9d51dbba27d04dec967e0 (patch)
treecc3411cf40c445f3ea352ae2f86a1c13e1bcbd57 /kernel/sched
parentsched/fair: Use load instead of runnable load in load_balance() (diff)
downloadwireguard-linux-2ab4092fc82d6001fdd9d51dbba27d04dec967e0.tar.xz
wireguard-linux-2ab4092fc82d6001fdd9d51dbba27d04dec967e0.zip
sched/fair: Spread out tasks evenly when not overloaded
When there is only one CPU per group, using the idle CPUs to evenly spread tasks doesn't make sense and nr_running is a better metrics. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: Ben Segall <bsegall@google.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: hdanton@sina.com Cc: parth@linux.ibm.com Cc: pauld@redhat.com Cc: quentin.perret@arm.com Cc: riel@surriel.com Cc: srikar@linux.vnet.ibm.com Cc: valentin.schneider@arm.com Link: https://lkml.kernel.org/r/1571405198-27570-8-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e6a3db08481c..f489f603f317 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8591,18 +8591,34 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
busiest->sum_nr_running > local->sum_nr_running + 1)
goto force_balance;
- if (busiest->group_type != group_overloaded &&
- (env->idle == CPU_NOT_IDLE ||
- local->idle_cpus <= (busiest->idle_cpus + 1)))
- /*
- * If the busiest group is not overloaded
- * and there is no imbalance between this and busiest group
- * wrt. idle CPUs, it is balanced. The imbalance
- * becomes significant if the diff is greater than 1 otherwise
- * we might end up just moving the imbalance to another
- * group.
- */
- goto out_balanced;
+ if (busiest->group_type != group_overloaded) {
+ if (env->idle == CPU_NOT_IDLE)
+ /*
+ * If the busiest group is not overloaded (and as a
+ * result the local one too) but this CPU is already
+ * busy, let another idle CPU try to pull task.
+ */
+ goto out_balanced;
+
+ if (busiest->group_weight > 1 &&
+ local->idle_cpus <= (busiest->idle_cpus + 1))
+ /*
+ * If the busiest group is not overloaded
+ * and there is no imbalance between this and busiest
+ * group wrt idle CPUs, it is balanced. The imbalance
+ * becomes significant if the diff is greater than 1
+ * otherwise we might end up to just move the imbalance
+ * on another group. Of course this applies only if
+ * there is more than 1 CPU per group.
+ */
+ goto out_balanced;
+
+ if (busiest->sum_h_nr_running == 1)
+ /*
+ * busiest doesn't have any tasks waiting to run
+ */
+ goto out_balanced;
+ }
force_balance:
/* Looks like there is an imbalance. Compute it */