aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-12-05 11:23:56 +0100
committerIngo Molnar <mingo@kernel.org>2019-01-27 12:29:37 +0100
commitf8a696f25ba09a1821dc6ca3db56f41c264fb896 (patch)
tree79d77c0c14f8c7e4fe282e556359ce469507a3ad /kernel/sched
parentsched/topology: Introduce a sysctl for Energy Aware Scheduling (diff)
downloadlinux-dev-f8a696f25ba09a1821dc6ca3db56f41c264fb896.tar.xz
linux-dev-f8a696f25ba09a1821dc6ca3db56f41c264fb896.zip
sched/core: Give DCE a fighting chance
All that fancy new Energy-Aware scheduling foo is hidden behind a static_key, which is awesome if you have the stuff enabled in your config. However, when you lack all the prerequisites it doesn't make any sense to pretend we'll ever actually run this, so provide a little more clue to the compiler so it can more agressively delete the code. text data bss dec hex filename 50297 976 96 51369 c8a9 defconfig-build/kernel/sched/fair.o 49227 944 96 50267 c45b defconfig-build/kernel/sched/fair.o Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/sched.h18
-rw-r--r--kernel/sched/topology.c2
3 files changed, 16 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50aa2aba69bd..b1374fbddd0d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6607,7 +6607,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
- if (static_branch_unlikely(&sched_energy_present)) {
+ if (sched_energy_enabled()) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
if (new_cpu >= 0)
return new_cpu;
@@ -8635,7 +8635,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
*/
update_sd_lb_stats(env, &sds);
- if (static_branch_unlikely(&sched_energy_present)) {
+ if (sched_energy_enabled()) {
struct root_domain *rd = env->dst_rq->rd;
if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d04530bf251f..d27c1a5d4e25 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2299,11 +2299,19 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
#endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+
#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
-#else
+
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
+
+static inline bool sched_energy_enabled(void)
+{
+ return static_branch_unlikely(&sched_energy_present);
+}
+
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
+
#define perf_domain_span(pd) NULL
-#endif
+static inline bool sched_energy_enabled(void) { return false; }
-#ifdef CONFIG_SMP
-extern struct static_key_false sched_energy_present;
-#endif
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 50c3fc316c54..4ae9403420ed 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -201,8 +201,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
return 1;
}
-DEFINE_STATIC_KEY_FALSE(sched_energy_present);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+DEFINE_STATIC_KEY_FALSE(sched_energy_present);
unsigned int sysctl_sched_energy_aware = 1;
DEFINE_MUTEX(sched_energy_mutex);
bool sched_energy_update;