From aa93cd53bc1b91b5f99c7b55e3dcc1ac98e99558 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Thu, 19 Dec 2019 16:44:55 -0500 Subject: sched: Micro optimization in pick_next_task() and in check_preempt_curr() This introduces an optimization based on xxx_sched_class addresses in two hot scheduler functions: pick_next_task() and check_preempt_curr(). It is possible to compare pointers to sched classes to check, which of them has a higher priority, instead of current iterations using for_each_class(). One more result of the patch is that size of object file becomes a little less (excluding added BUG_ON(), which goes in __init section): $size kernel/sched/core.o text data bss dec hex filename before: 66446 18957 676 86079 1503f kernel/sched/core.o after: 66398 18957 676 86031 1500f kernel/sched/core.o Signed-off-by: Kirill Tkhai Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Link: http://lkml.kernel.org/r/711a9c4b-ff32-1136-b848-17c622d548f3@yandex.ru --- kernel/sched/core.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 81640fe0eae8..0a08d0c235d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1412,20 +1412,10 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) { - const struct sched_class *class; - - if (p->sched_class == rq->curr->sched_class) { + if (p->sched_class == rq->curr->sched_class) rq->curr->sched_class->check_preempt_curr(rq, p, flags); - } else { - for_each_class(class) { - if (class == rq->curr->sched_class) - break; - if (class == p->sched_class) { - resched_curr(rq); - break; - } - } - } + else if (p->sched_class > rq->curr->sched_class) + resched_curr(rq); /* * A queue event has occurred, and we're going to schedule. In @@ -4003,8 +3993,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * higher scheduling class, because otherwise those loose the * opportunity to pull in more work from other CPUs. */ - if (likely((prev->sched_class == &idle_sched_class || - prev->sched_class == &fair_sched_class) && + if (likely(prev->sched_class <= &fair_sched_class && rq->nr_running == rq->cfs.h_nr_running)) { p = pick_next_task_fair(rq, prev, rf); -- cgit v1.2.3-59-g8ed1b