aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-07-16 13:11:24 +0800
committerWill Deacon <will@kernel.org>2020-07-20 11:50:47 +0100
commit1b86abc1c645ad5c9c7bf70910cb3ce73939d2d7 (patch)
tree8f72d9b938e23f26de7f0be72a0e11efc77c8b1c /kernel/time
parentarm64: perf: Correct the event index in sysfs (diff)
downloadlinux-dev-1b86abc1c645ad5c9c7bf70910cb3ce73939d2d7.tar.xz
linux-dev-1b86abc1c645ad5c9c7bf70910cb3ce73939d2d7.zip
sched_clock: Expose struct clock_read_data
In order to support perf_event_mmap_page::cap_time features, an architecture needs, aside from a userspace readable counter register, to expose the exact clock data so that userspace can convert the counter register into a correct timestamp. Provide struct clock_read_data and two (seqcount) helpers so that architectures (arm64 in specific) can expose the numbers to userspace. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Leo Yan <leo.yan@linaro.org> Link: https://lore.kernel.org/r/20200716051130.4359-2-leo.yan@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/sched_clock.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index fa3f800d7d76..0acaadc3156c 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -20,31 +20,6 @@
#include "timekeeping.h"
/**
- * struct clock_read_data - data required to read from sched_clock()
- *
- * @epoch_ns: sched_clock() value at last update
- * @epoch_cyc: Clock cycle value at last update.
- * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
- * clocks.
- * @read_sched_clock: Current clock source (or dummy source when suspended).
- * @mult: Multipler for scaled math conversion.
- * @shift: Shift value for scaled math conversion.
- *
- * Care must be taken when updating this structure; it is read by
- * some very hot code paths. It occupies <=40 bytes and, when combined
- * with the seqcount used to synchronize access, comfortably fits into
- * a 64 byte cache line.
- */
-struct clock_read_data {
- u64 epoch_ns;
- u64 epoch_cyc;
- u64 sched_clock_mask;
- u64 (*read_sched_clock)(void);
- u32 mult;
- u32 shift;
-};
-
-/**
* struct clock_data - all data needed for sched_clock() (including
* registration of a new clock source)
*
@@ -93,6 +68,17 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift;
}
+struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
+{
+ *seq = raw_read_seqcount(&cd.seq);
+ return cd.read_data + (*seq & 1);
+}
+
+int sched_clock_read_retry(unsigned int seq)
+{
+ return read_seqcount_retry(&cd.seq, seq);
+}
+
unsigned long long notrace sched_clock(void)
{
u64 cyc, res;
@@ -100,13 +86,12 @@ unsigned long long notrace sched_clock(void)
struct clock_read_data *rd;
do {
- seq = raw_read_seqcount(&cd.seq);
- rd = cd.read_data + (seq & 1);
+ rd = sched_clock_read_begin(&seq);
cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
rd->sched_clock_mask;
res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
- } while (read_seqcount_retry(&cd.seq, seq));
+ } while (sched_clock_read_retry(seq));
return res;
}