aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorK Prateek Nayak <kprateek.nayak@amd.com>2024-12-23 04:34:06 +0000
committerPeter Zijlstra <peterz@infradead.org>2025-01-13 14:10:25 +0100
commit3229adbe787534b43430f92e10175c9b77f2d27c (patch)
tree3fb269c3709b8d4c647758e6a41940f6444ebb2a
parentsched/fair: Do not compute NUMA Balancing stats unnecessarily during lb (diff)
downloadwireguard-linux-3229adbe787534b43430f92e10175c9b77f2d27c.tar.xz
wireguard-linux-3229adbe787534b43430f92e10175c9b77f2d27c.zip
sched/fair: Do not compute overloaded status unnecessarily during lb
Only set sg_overloaded when computing sg_lb_stats() at the highest sched domain since rd->overloaded status is updated only when load balancing at the highest domain. While at it, move setting of sg_overloaded below idle_cpu() check since an idle CPU can never be overloaded. Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20241223043407.1611-8-kprateek.nayak@amd.com
Diffstat (limited to '')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 650d698244c4..98ac49ce78ea 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10410,6 +10410,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
bool *sg_overutilized)
{
int i, nr_running, local_group, sd_flags = env->sd->flags;
+ bool balancing_at_rd = !env->sd->parent;
memset(sgs, 0, sizeof(*sgs));
@@ -10427,9 +10428,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
- if (nr_running > 1)
- *sg_overloaded = 1;
-
if (cpu_overutilized(i))
*sg_overutilized = 1;
@@ -10442,6 +10440,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
continue;
}
+ /* Overload indicator is only updated at root domain */
+ if (balancing_at_rd && nr_running > 1)
+ *sg_overloaded = 1;
+
#ifdef CONFIG_NUMA_BALANCING
/* Only fbq_classify_group() uses this to classify NUMA groups */
if (sd_flags & SD_NUMA) {