aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-09 09:06:17 +0200
committerIngo Molnar <mingo@kernel.org>2015-09-13 09:53:02 +0200
commit006cdf025a33cb008c3d466bed311c2c347b458f (patch)
tree93514a7de219a4fddf47cfeac20010f7f4912f6f /kernel/sched/fair.c
parentsched/fair: Defer calling scaling functions (diff)
downloadlinux-dev-006cdf025a33cb008c3d466bed311c2c347b458f.tar.xz
linux-dev-006cdf025a33cb008c3d466bed311c2c347b458f.zip
sched/fair: Optimize per entity utilization tracking
Currently the load_{sum,avg} and util_{sum,avg} tracking is asymmetric in that load tracking gets a 2^10 unit from the weight, but util gets no such factor. This results in more lost bits for util scaling and asymmetric scaling rules. Fix this by removing shifts, such that we gain the 2^10 factor from scaling. There is no risk of overflowing the u32 as the max value is now LOAD_AVG_MAX << 10, which is still well below UINT_MAX. This further entangles the assumption that both LOAD and CAPACITY shifts are the same (and 10) so put in an assertion for that. This fixes the math for the LOAD_RESOLUTION != 0 case. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fc835fa23308..9176f7c588a8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -682,7 +682,7 @@ void init_entity_runnable_average(struct sched_entity *se)
sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
- sa->util_sum = LOAD_AVG_MAX;
+ sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
@@ -2515,6 +2515,10 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
+#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
+#error "load tracking assumes 2^10 as unit"
+#endif
+
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
/*
@@ -2599,7 +2603,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
}
}
if (running)
- sa->util_sum += cap_scale(scaled_delta_w, scale_cpu);
+ sa->util_sum += scaled_delta_w * scale_cpu;
delta -= delta_w;
@@ -2623,7 +2627,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * contrib;
}
if (running)
- sa->util_sum += cap_scale(contrib, scale_cpu);
+ sa->util_sum += contrib * scale_cpu;
}
/* Remainder of delta accrued against u_0` */
@@ -2634,7 +2638,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
if (running)
- sa->util_sum += cap_scale(scaled_delta, scale_cpu);
+ sa->util_sum += scaled_delta * scale_cpu;
sa->period_contrib += delta;
@@ -2644,7 +2648,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_avg =
div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
}
- sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX;
+ sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
}
return decayed;
@@ -2686,8 +2690,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
sa->util_avg = max_t(long, sa->util_avg - r, 0);
- sa->util_sum = max_t(s32, sa->util_sum -
- ((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
+ sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
}
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,