aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 15:47:08 -0800
committerIngo Molnar <mingo@elte.hu>2010-11-18 13:27:49 +0100
commit3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37 (patch)
tree6839bc473200dcb69c5de998921684ac569ce18b /kernel
parentsched: Update shares on idle_balance (diff)
downloadlinux-dev-3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37.tar.xz
linux-dev-3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37.zip
sched: Implement demand based update_cfs_load()
When the system is busy, dilation of rq->next_balance makes lb->update_shares() insufficiently frequent for threads which don't sleep (no dequeue/enqueue updates). Adjust for this by making demand based updates based on the accumulation of execution time sufficient to wrap our averaging window. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.291159744@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to '')
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/sched_fair.c12
2 files changed, 20 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dadab4d13875..e914a716e1d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -353,9 +353,16 @@ struct cfs_rq {
*/
unsigned long h_load;
+ /*
+ * Maintaining per-cpu shares distribution for group scheduling
+ *
+ * load_stamp is the last time we updated the load average
+ * load_last is the last time we updated the load average and saw load
+ * load_unacc_exec_time is currently unaccounted execution time
+ */
u64 load_avg;
u64 load_period;
- u64 load_stamp, load_last;
+ u64 load_stamp, load_last, load_unacc_exec_time;
unsigned long load_contribution;
#endif
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 33f941dcf88c..e7e2f08e6d01 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -539,6 +539,9 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+static void update_cfs_load(struct cfs_rq *cfs_rq);
+static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -558,6 +561,14 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->vruntime += delta_exec_weighted;
update_min_vruntime(cfs_rq);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ cfs_rq->load_unacc_exec_time += delta_exec;
+ if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+ update_cfs_load(cfs_rq);
+ update_cfs_shares(cfs_rq, 0);
+ }
+#endif
}
static void update_curr(struct cfs_rq *cfs_rq)
@@ -713,6 +724,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
}
cfs_rq->load_stamp = now;
+ cfs_rq->load_unacc_exec_time = 0;
cfs_rq->load_period += delta;
if (load) {
cfs_rq->load_last = now;