aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 17:15:38 +0100
committerIngo Molnar <mingo@elte.hu>2010-03-11 18:32:50 +0100
commitb42e0c41a422a212ddea0666d5a3a0e3c35206db (patch)
tree443cf5918548cab86c3f9f3f34a1b700d809070b /kernel/sched.c
parentsched: Rate-limit nohz (diff)
downloadlinux-dev-b42e0c41a422a212ddea0666d5a3a0e3c35206db.tar.xz
linux-dev-b42e0c41a422a212ddea0666d5a3a0e3c35206db.zip
sched: Remove avg_wakeup
Testing the load which led to this heuristic (nfs4 kbuild) shows that it has outlived it's usefullness. With intervening load balancing changes, I cannot see any difference with/without, so recover there fastpath cycles. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301062.6785.29.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c26
1 files changed, 4 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 60b1bbe2ad1b..35a8626ace7d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1880,9 +1880,6 @@ static void update_avg(u64 *avg, u64 sample)
static void
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
- if (wakeup)
- p->se.start_runtime = p->se.sum_exec_runtime;
-
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1;
@@ -1890,17 +1887,11 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
- if (sleep) {
- if (p->se.last_wakeup) {
- update_avg(&p->se.avg_overlap,
- p->se.sum_exec_runtime - p->se.last_wakeup);
- p->se.last_wakeup = 0;
- } else {
- update_avg(&p->se.avg_wakeup,
- sysctl_sched_wakeup_granularity);
- }
+ if (sleep && p->se.last_wakeup) {
+ update_avg(&p->se.avg_overlap,
+ p->se.sum_exec_runtime - p->se.last_wakeup);
+ p->se.last_wakeup = 0;
}
-
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
@@ -2466,13 +2457,6 @@ out_activate:
*/
if (!in_interrupt()) {
struct sched_entity *se = &current->se;
- u64 sample = se->sum_exec_runtime;
-
- if (se->last_wakeup)
- sample -= se->last_wakeup;
- else
- sample -= se->start_runtime;
- update_avg(&se->avg_wakeup, sample);
se->last_wakeup = se->sum_exec_runtime;
}
@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p)
p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;
- p->se.start_runtime = 0;
- p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));