aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-02-15 02:20:51 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-09 14:41:04 +0100
commit57dc3bcd454eb420ddf25d89852993b61b351327 (patch)
treec3ecd7d56d9537f55a294b30672a9e55f8df74d0 /drivers/cpufreq/cpufreq_ondemand.c
parentcpufreq: governor: Reset sample delay in store_sampling_rate() (diff)
downloadlinux-dev-57dc3bcd454eb420ddf25d89852993b61b351327.tar.xz
linux-dev-57dc3bcd454eb420ddf25d89852993b61b351327.zip
cpufreq: governor: Move rate_mult to struct policy_dbs
The rate_mult field in struct od_cpu_dbs_info_s is used by the code shared with the conservative governor and to access it that code has to do an ugly governor type check. However, first of all it is ever only used for policy->cpu, so it is per-policy rather than per-CPU and second, it is initialized to 1 by cpufreq_governor_start(), so if the conservative governor never modifies it, it will have no effect on the results of any computations. For these reasons, move rate_mult to struct policy_dbs_info (as a common field). Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 707c017f4e67..812d9949a0c4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -164,7 +164,7 @@ static void od_update(struct cpufreq_policy *policy)
if (load > dbs_data->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
- dbs_info->rate_mult = dbs_data->sampling_down_factor;
+ policy_dbs->rate_mult = dbs_data->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
@@ -175,7 +175,7 @@ static void od_update(struct cpufreq_policy *policy)
freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
- dbs_info->rate_mult = 1;
+ policy_dbs->rate_mult = 1;
if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
@@ -214,7 +214,7 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
delay = dbs_info->freq_hi_jiffies;
} else {
delay = delay_for_sampling_rate(dbs_data->sampling_rate
- * dbs_info->rate_mult);
+ * policy_dbs->rate_mult);
}
}
@@ -266,20 +266,27 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- unsigned int input, j;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
+
dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- j);
- dbs_info->rate_mult = 1;
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ /*
+ * Doing this without locking might lead to using different
+ * rate_mult values in od_update() and od_dbs_timer().
+ */
+ mutex_lock(&policy_dbs->timer_mutex);
+ policy_dbs->rate_mult = 1;
+ mutex_unlock(&policy_dbs->timer_mutex);
}
+
return count;
}