aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_stats.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2020-10-06 21:43:43 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2020-10-07 17:13:15 +0200
commitefad4240da949fc3249015065b95d708b72ae670 (patch)
treef9879c25cda6bd6fddae7cfe644b0a55c619568c /drivers/cpufreq/cpufreq_stats.c
parentcpufreq: schedutil: Simplify sugov_fast_switch() (diff)
downloadlinux-dev-efad4240da949fc3249015065b95d708b72ae670.tar.xz
linux-dev-efad4240da949fc3249015065b95d708b72ae670.zip
cpufreq: stats: Add memory barrier to store_reset()
There is nothing to prevent the CPU or the compiler from reordering the writes to stats->reset_time and stats->reset_pending in store_reset(), in which case the readers of stats->reset_time may see a stale value. Moreover, on 32-bit arches the write to reset_time cannot be completed in one go, so the readers of it may see a partially updated value in that case. To prevent that from happening, add a write memory barrier between the writes to stats->reset_time and stats->reset_pending in store_reset() and corresponding read memory barrier in the readers of stats->reset_time. Fixes: 40c3bd4cfa6f ("cpufreq: stats: Defer stats update to cpufreq_stats_record_transition()") Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq/cpufreq_stats.c')
-rw-r--r--drivers/cpufreq/cpufreq_stats.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 8e7d64f34041..1b1389face85 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -47,6 +47,11 @@ static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
/* Adjust for the time elapsed since reset was requested */
WRITE_ONCE(stats->reset_pending, 0);
+ /*
+ * Prevent the reset_time read from being reordered before the
+ * reset_pending accesses in cpufreq_stats_record_transition().
+ */
+ smp_rmb();
cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
}
@@ -71,10 +76,16 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
for (i = 0; i < stats->state_num; i++) {
if (pending) {
- if (i == stats->last_index)
+ if (i == stats->last_index) {
+ /*
+ * Prevent the reset_time read from occurring
+ * before the reset_pending read above.
+ */
+ smp_rmb();
time = get_jiffies_64() - READ_ONCE(stats->reset_time);
- else
+ } else {
time = 0;
+ }
} else {
time = stats->time_in_state[i];
if (i == stats->last_index)
@@ -99,6 +110,11 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
* avoid races.
*/
WRITE_ONCE(stats->reset_time, get_jiffies_64());
+ /*
+ * The memory barrier below is to prevent the readers of reset_time from
+ * seeing a stale or partially updated value.
+ */
+ smp_wmb();
WRITE_ONCE(stats->reset_pending, 1);
return count;