aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2019-05-06 10:54:27 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2019-05-06 10:54:27 +0200
commit7d4a27c1c861f9116b7acf427e3fe3260287b876 (patch)
tree9ac8f77023bce98b4846c9a624ff5f469d844bd2 /kernel
parentMerge branch 'pm-x86' (diff)
parentcpufreq: Fix kobject memleak (diff)
downloadlinux-dev-7d4a27c1c861f9116b7acf427e3fe3260287b876.tar.xz
linux-dev-7d4a27c1c861f9116b7acf427e3fe3260287b876.zip
Merge branch 'pm-cpufreq'
* pm-cpufreq: (24 commits) cpufreq: Fix kobject memleak cpufreq: armada-37xx: fix frequency calculation for opp cpufreq: centrino: Fix centrino_setpolicy() kerneldoc comment cpufreq: qoriq: add support for lx2160a cpufreq: qoriq: Add ls1028a chip support cpufreq: Move ->get callback check outside of __cpufreq_get() cpufreq: Remove needless bios_limit check in show_bios_limit() drivers/cpufreq/acpi-cpufreq.c: This fixes the following checkpatch warning cpufreq: boost: Remove CONFIG_CPU_FREQ_BOOST_SW Kconfig option cpufreq: stats: Use lock by stat to replace global spin lock cpufreq: Remove cpufreq_driver check in cpufreq_boost_supported() cpufreq: maple: Remove redundant code from maple_cpufreq_init() cpufreq: ppc_cbe: fix possible object reference leak cpufreq: pmac32: fix possible object reference leak cpufreq/pasemi: fix possible object reference leak cpufreq: maple: fix possible object reference leak cpufreq: kirkwood: fix possible object reference leak cpufreq: imx6q: fix possible object reference leak cpufreq: ap806: fix possible object reference leak drivers/cpufreq: Convert some slow-path static_cpu_has() callers to boot_cpu_has() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/cpufreq_schedutil.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 5c41ea367422..b3a878aa593d 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -13,6 +13,8 @@
#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
+#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
+
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
@@ -51,7 +53,6 @@ struct sugov_cpu {
u64 last_update;
unsigned long bw_dl;
- unsigned long min;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
*
* The IO wait boost of a task is disabled after a tick since the last update
* of a CPU. If a new IO wait boost is requested after more then a tick, then
- * we enable the boost starting from the minimum frequency, which improves
- * energy efficiency by ignoring sporadic wakeups from IO.
+ * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
+ * efficiency by ignoring sporadic wakeups from IO.
*/
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
bool set_iowait_boost)
@@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
if (delta_ns <= TICK_NSEC)
return false;
- sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
+ sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost;
return true;
@@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
*
* Each time a task wakes up after an IO operation, the CPU utilization can be
* boosted to a certain utilization which doubles at each "frequent and
- * successive" wakeup from IO, ranging from the utilization of the minimum
- * OPP to the utilization of the maximum OPP.
+ * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
+ * of the maximum OPP.
+ *
* To keep doubling, an IO boost has to be requested at least once per tick,
* otherwise we restart from the utilization of the minimum OPP.
*/
@@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
}
/* First wakeup after IO: start with minimum boost */
- sg_cpu->iowait_boost = sg_cpu->min;
+ sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
}
/**
@@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
* No boost pending; reduce the boost value.
*/
sg_cpu->iowait_boost >>= 1;
- if (sg_cpu->iowait_boost < sg_cpu->min) {
+ if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
return util;
}
@@ -826,9 +828,6 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
- sg_cpu->min =
- (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
- policy->cpuinfo.max_freq;
}
for_each_cpu(cpu, policy->cpus) {