aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c2
5 files changed, 6 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c7815a6b70e0..c6ad4071c791 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -861,7 +861,7 @@ struct sched_class {
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
- struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
+ struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
diff --git a/kernel/sched.c b/kernel/sched.c
index e51d75f4b4d7..b67a288a0f1f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3410,14 +3410,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, u64 now)
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.nr_running)) {
- p = fair_sched_class.pick_next_task(rq, now);
+ p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
}
class = sched_class_highest;
for ( ; ; ) {
- p = class->pick_next_task(rq, now);
+ p = class->pick_next_task(rq);
if (p)
return p;
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fb4d614af2c3..0b23aaf074fa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -859,7 +859,7 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
__check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
}
-static struct task_struct *pick_next_task_fair(struct rq *rq, u64 now)
+static struct task_struct *pick_next_task_fair(struct rq *rq)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index f69e083e0d96..9f4c28f858fe 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -13,7 +13,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p)
resched_task(rq->idle);
}
-static struct task_struct *pick_next_task_idle(struct rq *rq, u64 now)
+static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 60591e2512b1..c0b0d6237bb6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -73,7 +73,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}
-static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
+static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct rt_prio_array *array = &rq->rt.active;
struct task_struct *next;