aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-05-11 00:26:27 +0200
committerIngo Molnar <mingo@kernel.org>2012-05-14 15:05:26 +0200
commit870a0bb5d636156502769233d02a0d5791d4366a (patch)
treec008809e111a3487af2c27f8c105200950f11671 /kernel/sched
parentsched/fair: Revert sched-domain iteration breakage (diff)
downloadlinux-dev-870a0bb5d636156502769233d02a0d5791d4366a.tar.xz
linux-dev-870a0bb5d636156502769233d02a0d5791d4366a.zip
sched/numa: Don't scale the imbalance
It's far too easy to get ridiculously large imbalance pct when you scale it like that. Use a fixed 125% for now. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-zsriaft1dv7hhboyrpvqjy6s@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 24922b7ff567..6883d998dc38 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6261,11 +6261,6 @@ static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
static int sched_domains_curr_level;
-static inline unsigned long numa_scale(unsigned long x, int level)
-{
- return x * sched_domains_numa_distance[level] / sched_domains_numa_scale;
-}
-
static inline int sd_local_flags(int level)
{
if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
@@ -6286,7 +6281,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
.min_interval = sd_weight,
.max_interval = 2*sd_weight,
.busy_factor = 32,
- .imbalance_pct = 100 + numa_scale(25, level),
+ .imbalance_pct = 125,
.cache_nice_tries = 2,
.busy_idx = 3,
.idle_idx = 2,