aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/pelt.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2020-02-24 09:52:17 +0000
committerIngo Molnar <mingo@kernel.org>2020-02-24 11:36:36 +0100
commit0dacee1bfa70e171be3a12a30414c228453048d2 (patch)
tree1820b6a3f33b1761994d96f9705a5aba5a4fe03d /kernel/sched/pelt.c
parentsched/numa: Use similar logic to the load balancer for moving between domains with spare capacity (diff)
downloadlinux-dev-0dacee1bfa70e171be3a12a30414c228453048d2.tar.xz
linux-dev-0dacee1bfa70e171be3a12a30414c228453048d2.zip
sched/pelt: Remove unused runnable load average
Now that runnable_load_avg is no more used, we can remove it to make space for a new signal. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: "Dietmar Eggemann <dietmar.eggemann@arm.com>" Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Phil Auld <pauld@redhat.com> Cc: Hillf Danton <hdanton@sina.com> Link: https://lore.kernel.org/r/20200224095223.13361-8-mgorman@techsingularity.net
Diffstat (limited to 'kernel/sched/pelt.c')
-rw-r--r--kernel/sched/pelt.c62
1 files changed, 24 insertions, 38 deletions
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index bd006b79b360..3eb0ed333dcb 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -108,7 +108,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
*/
static __always_inline u32
accumulate_sum(u64 delta, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
+ unsigned long load, int running)
{
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
@@ -121,8 +121,6 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
*/
if (periods) {
sa->load_sum = decay_load(sa->load_sum, periods);
- sa->runnable_load_sum =
- decay_load(sa->runnable_load_sum, periods);
sa->util_sum = decay_load((u64)(sa->util_sum), periods);
/*
@@ -148,8 +146,6 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
if (load)
sa->load_sum += load * contrib;
- if (runnable)
- sa->runnable_load_sum += runnable * contrib;
if (running)
sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
@@ -186,7 +182,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
*/
static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
+ unsigned long load, int running)
{
u64 delta;
@@ -222,7 +218,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* Also see the comment in accumulate_sum().
*/
if (!load)
- runnable = running = 0;
+ running = 0;
/*
* Now we know we crossed measurement unit boundaries. The *_avg
@@ -231,14 +227,14 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* Step 1: accumulate *_sum since last_update_time. If we haven't
* crossed period boundaries, finish.
*/
- if (!accumulate_sum(delta, sa, load, runnable, running))
+ if (!accumulate_sum(delta, sa, load, running))
return 0;
return 1;
}
static __always_inline void
-___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
+___update_load_avg(struct sched_avg *sa, unsigned long load)
{
u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
@@ -246,7 +242,6 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
* Step 2: update *_avg.
*/
sa->load_avg = div_u64(load * sa->load_sum, divider);
- sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
@@ -254,17 +249,13 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
* sched_entity:
*
* task:
- * se_runnable() == se_weight()
+ * se_weight() = se->load.weight
*
* group: [ see update_cfs_group() ]
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
- * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
*
- * load_sum := runnable_sum
- * load_avg = se_weight(se) * runnable_avg
- *
- * runnable_load_sum := runnable_sum
- * runnable_load_avg = se_runnable(se) * runnable_avg
+ * load_sum := runnable
+ * load_avg = se_weight(se) * load_sum
*
* XXX collapse load_sum and runnable_load_sum
*
@@ -272,15 +263,12 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
*
* load_sum = \Sum se_weight(se) * se->avg.load_sum
* load_avg = \Sum se->avg.load_avg
- *
- * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
- * runnable_load_avg = \Sum se->avg.runable_load_avg
*/
int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
- if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ if (___update_load_sum(now, &se->avg, 0, 0)) {
+ ___update_load_avg(&se->avg, se_weight(se));
trace_pelt_se_tp(se);
return 1;
}
@@ -290,10 +278,9 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
- cfs_rq->curr == se)) {
+ if (___update_load_sum(now, &se->avg, !!se->on_rq, cfs_rq->curr == se)) {
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ ___update_load_avg(&se->avg, se_weight(se));
cfs_se_util_change(&se->avg);
trace_pelt_se_tp(se);
return 1;
@@ -306,10 +293,9 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
- scale_load_down(cfs_rq->runnable_weight),
cfs_rq->curr != NULL)) {
- ___update_load_avg(&cfs_rq->avg, 1, 1);
+ ___update_load_avg(&cfs_rq->avg, 1);
trace_pelt_cfs_tp(cfs_rq);
return 1;
}
@@ -322,9 +308,9 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
- * runnable_load_sum = load_sum
+ * runnable_sum = util_sum
*
- * load_avg and runnable_load_avg are not supported and meaningless.
+ * load_avg is not supported and meaningless.
*
*/
@@ -332,10 +318,9 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, &rq->avg_rt,
running,
- running,
running)) {
- ___update_load_avg(&rq->avg_rt, 1, 1);
+ ___update_load_avg(&rq->avg_rt, 1);
trace_pelt_rt_tp(rq);
return 1;
}
@@ -348,7 +333,9 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
- * runnable_load_sum = load_sum
+ * runnable_sum = util_sum
+ *
+ * load_avg is not supported and meaningless.
*
*/
@@ -356,10 +343,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, &rq->avg_dl,
running,
- running,
running)) {
- ___update_load_avg(&rq->avg_dl, 1, 1);
+ ___update_load_avg(&rq->avg_dl, 1);
trace_pelt_dl_tp(rq);
return 1;
}
@@ -373,7 +359,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
- * runnable_load_sum = load_sum
+ * runnable_sum = util_sum
+ *
+ * load_avg is not supported and meaningless.
*
*/
@@ -402,15 +390,13 @@ int update_irq_load_avg(struct rq *rq, u64 running)
*/
ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
0,
- 0,
0);
ret += ___update_load_sum(rq->clock, &rq->avg_irq,
1,
- 1,
1);
if (ret) {
- ___update_load_avg(&rq->avg_irq, 1, 1);
+ ___update_load_avg(&rq->avg_irq, 1);
trace_pelt_irq_tp(rq);
}