diff options
Diffstat (limited to 'include/asm-generic/barrier.h')
-rw-r--r-- | include/asm-generic/barrier.h | 117 |
1 files changed, 78 insertions, 39 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 85b28eb80b11..961f4d88f9ef 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -14,12 +14,43 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> +#include <linux/kcsan-checks.h> +#include <asm/rwonce.h> #ifndef nop #define nop() asm volatile ("nop") #endif /* + * Architectures that want generic instrumentation can define __ prefixed + * variants of all barriers. + */ + +#ifdef __mb +#define mb() do { kcsan_mb(); __mb(); } while (0) +#endif + +#ifdef __rmb +#define rmb() do { kcsan_rmb(); __rmb(); } while (0) +#endif + +#ifdef __wmb +#define wmb() do { kcsan_wmb(); __wmb(); } while (0) +#endif + +#ifdef __dma_mb +#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0) +#endif + +#ifdef __dma_rmb +#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) +#endif + +#ifdef __dma_wmb +#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0) +#endif + +/* * Force strict CPU ordering. And yes, this is required on UP too when we're * talking to devices. * @@ -38,6 +69,10 @@ #define wmb() mb() #endif +#ifndef dma_mb +#define dma_mb() mb() +#endif + #ifndef dma_rmb #define dma_rmb() rmb() #endif @@ -46,10 +81,6 @@ #define dma_wmb() wmb() #endif -#ifndef read_barrier_depends -#define read_barrier_depends() do { } while (0) -#endif - #ifndef __smp_mb #define __smp_mb() mb() #endif @@ -62,26 +93,18 @@ #define __smp_wmb() wmb() #endif -#ifndef __smp_read_barrier_depends -#define __smp_read_barrier_depends() read_barrier_depends() -#endif - #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() __smp_mb() +#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0) #endif #ifndef smp_rmb -#define smp_rmb() __smp_rmb() +#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) #endif #ifndef smp_wmb -#define smp_wmb() __smp_wmb() -#endif - -#ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() __smp_read_barrier_depends() +#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) #endif #else /* !CONFIG_SMP */ @@ -98,10 +121,6 @@ #define smp_wmb() barrier() #endif -#ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() do { } while (0) -#endif - #endif /* CONFIG_SMP */ #ifndef __smp_store_mb @@ -128,29 +147,29 @@ do { \ #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif #ifdef CONFIG_SMP #ifndef smp_store_mb -#define smp_store_mb(var, value) __smp_store_mb(var, value) +#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() __smp_mb__before_atomic() +#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() __smp_mb__after_atomic() +#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) #endif #ifndef smp_store_release -#define smp_store_release(p, v) __smp_store_release(p, v) +#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #endif #ifndef smp_load_acquire @@ -183,24 +202,23 @@ do { \ #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif #endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ -#define virt_mb() __smp_mb() -#define virt_rmb() __smp_rmb() -#define virt_wmb() __smp_wmb() -#define virt_read_barrier_depends() __smp_read_barrier_depends() -#define virt_store_mb(var, value) __smp_store_mb(var, value) -#define virt_mb__before_atomic() __smp_mb__before_atomic() -#define virt_mb__after_atomic() __smp_mb__after_atomic() -#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0) +#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) +#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) +#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) +#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) +#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) +#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #define virt_load_acquire(p) __smp_load_acquire(p) /** @@ -229,14 +247,14 @@ do { \ #ifndef smp_cond_load_relaxed #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ cpu_relax(); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #endif @@ -250,12 +268,33 @@ do { \ */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ - typeof(*ptr) _val; \ + __unqual_scalar_typeof(*ptr) _val; \ _val = smp_cond_load_relaxed(ptr, cond_expr); \ smp_acquire__after_ctrl_dep(); \ - _val; \ + (typeof(*ptr))_val; \ }) #endif +/* + * pmem_wmb() ensures that all stores for which the modification + * are written to persistent storage by preceding instructions have + * updated persistent storage before any data access or data transfer + * caused by subsequent instructions is initiated. + */ +#ifndef pmem_wmb +#define pmem_wmb() wmb() +#endif + +/* + * ioremap_wc() maps I/O memory as memory with write-combining attributes. For + * this kind of memory accesses, the CPU may wait for prior accesses to be + * merged with subsequent ones. In some situation, such wait is bad for the + * performance. io_stop_wc() can be used to prevent the merging of + * write-combining memory accesses before this macro with those after it. + */ +#ifndef io_stop_wc +#define io_stop_wc() do { } while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ |