aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2024-11-04 16:43:06 +0100
committerPeter Zijlstra <peterz@infradead.org>2024-11-05 12:55:35 +0100
commit8ab40fc2b9086b915e46890bb9252dc7692f1da0 (patch)
tree8e682c5a7b473c8f54e5b313c5c237db0f875957
parenttime/sched_clock: Swap update_clock_read_data() latch writes (diff)
downloadwireguard-linux-8ab40fc2b9086b915e46890bb9252dc7692f1da0.tar.xz
wireguard-linux-8ab40fc2b9086b915e46890bb9252dc7692f1da0.zip
time/sched_clock: Broaden sched_clock()'s instrumentation coverage
Most of sched_clock()'s implementation is ineligible for instrumentation due to relying on sched_clock_noinstr(). Split the implementation off into an __always_inline function __sched_clock(), which is then used by the noinstr and instrumentable version, to allow more of sched_clock() to be covered by various instrumentation. This will allow instrumentation with the various sanitizers (KASAN, KCSAN, KMSAN, UBSAN). For KCSAN, we know that raw seqcount_latch usage without annotations will result in false positive reports: tell it that all of __sched_clock() is "atomic" for the latch reader; later changes in this series will take care of the writers. Co-developed-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20241104161910.780003-3-elver@google.com
-rw-r--r--kernel/time/sched_clock.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 85595fcf6aa2..29bdf309dae8 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -80,7 +80,7 @@ notrace int sched_clock_read_retry(unsigned int seq)
return raw_read_seqcount_latch_retry(&cd.seq, seq);
}
-unsigned long long noinstr sched_clock_noinstr(void)
+static __always_inline unsigned long long __sched_clock(void)
{
struct clock_read_data *rd;
unsigned int seq;
@@ -98,11 +98,23 @@ unsigned long long noinstr sched_clock_noinstr(void)
return res;
}
+unsigned long long noinstr sched_clock_noinstr(void)
+{
+ return __sched_clock();
+}
+
unsigned long long notrace sched_clock(void)
{
unsigned long long ns;
preempt_disable_notrace();
- ns = sched_clock_noinstr();
+ /*
+ * All of __sched_clock() is a seqcount_latch reader critical section,
+ * but relies on the raw helpers which are uninstrumented. For KCSAN,
+ * mark all accesses in __sched_clock() as atomic.
+ */
+ kcsan_nestable_atomic_begin();
+ ns = __sched_clock();
+ kcsan_nestable_atomic_end();
preempt_enable_notrace();
return ns;
}