aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/barrier.h
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-03-18 17:07:37 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2021-03-24 16:48:41 +0000
commit77ec462536a13d4b428a1eead725c4818a49f0b1 (patch)
tree85563cf866233aaa24c0ea0961da46c17362e724 /arch/arm64/include/asm/barrier.h
parentarm64: compat: Allow signal page to be remapped (diff)
downloadlinux-dev-77ec462536a13d4b428a1eead725c4818a49f0b1.tar.xz
linux-dev-77ec462536a13d4b428a1eead725c4818a49f0b1.zip
arm64: vdso: Avoid ISB after reading from cntvct_el0
We can avoid the expensive ISB instruction after reading the counter in the vDSO gettime functions by creating a fake address hazard against a dummy stack read, just like we do inside the kernel. Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Link: https://lore.kernel.org/r/20210318170738.7756-5-will@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/barrier.h')
-rw-r--r--arch/arm64/include/asm/barrier.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c3009b0e5239..37d891af8ea5 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -70,6 +70,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
return mask;
}
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)