aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/sched
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2019-12-19 16:44:54 -0500
committerPeter Zijlstra <peterz@infradead.org>2020-06-25 13:45:44 +0200
commita87e749e8fa1aaef9b4db32e21c2795e69ce67bf (patch)
tree967673dca41b3244195d574490bf8bd7c9cc4a70 /kernel/sched
parentsched: Have sched_class_highest define by vmlinux.lds.h (diff)
downloadwireguard-linux-a87e749e8fa1aaef9b4db32e21c2795e69ce67bf.tar.xz
wireguard-linux-a87e749e8fa1aaef9b4db32e21c2795e69ce67bf.zip
sched: Remove struct sched_class::next field
Now that the sched_class descriptors are defined in order via the linker script vmlinux.lds.h, there's no reason to have a "next" pointer to the previous priroity structure. The order of the sturctures can be aligned as an array, and used to index and find the next sched_class descriptor. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20191219214558.845353593@goodmis.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/deadline.c1
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/sched/stop_task.c1
6 files changed, 0 insertions, 6 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d9e79462993b..c9cc1d6fa363 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2481,7 +2481,6 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
const struct sched_class dl_sched_class
__attribute__((section("__dl_sched_class"))) = {
- .next = &rt_sched_class,
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3365f6b07c36..a63f400013de 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11124,7 +11124,6 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
*/
const struct sched_class fair_sched_class
__attribute__((section("__fair_sched_class"))) = {
- .next = &idle_sched_class,
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index f5806295356b..336d478bddc8 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -455,7 +455,6 @@ static void update_curr_idle(struct rq *rq)
*/
const struct sched_class idle_sched_class
__attribute__((section("__idle_sched_class"))) = {
- /* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 6543d4430331..f215eea6a966 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2431,7 +2431,6 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
const struct sched_class rt_sched_class
__attribute__((section("__rt_sched_class"))) = {
- .next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4165c06d1d7b..549e7e6e0a66 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1754,7 +1754,6 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL)
struct sched_class {
- const struct sched_class *next;
#ifdef CONFIG_UCLAMP_TASK
int uclamp_enabled;
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index f4bbd54caae0..394bc8126a1e 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -111,7 +111,6 @@ static void update_curr_stop(struct rq *rq)
*/
const struct sched_class stop_sched_class
__attribute__((section("__stop_sched_class"))) = {
- .next = &dl_sched_class,
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,