aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/barrier.h65
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h3
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h3
-rw-r--r--include/asm-generic/cacheflush.h6
-rw-r--r--include/asm-generic/error-injection.h4
-rw-r--r--include/asm-generic/logic_io.h2
-rw-r--r--include/asm-generic/sections.h24
7 files changed, 77 insertions, 30 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 640f09479bdf..3d503e74037f 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -14,6 +14,7 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/rwonce.h>
#ifndef nop
@@ -21,6 +22,31 @@
#endif
/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
@@ -62,15 +88,15 @@
#ifdef CONFIG_SMP
#ifndef smp_mb
-#define smp_mb() __smp_mb()
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
#endif
#ifndef smp_rmb
-#define smp_rmb() __smp_rmb()
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
#endif
#ifndef smp_wmb
-#define smp_wmb() __smp_wmb()
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
#endif
#else /* !CONFIG_SMP */
@@ -123,19 +149,19 @@ do { \
#ifdef CONFIG_SMP
#ifndef smp_store_mb
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
#endif
#ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
#endif
#ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
#endif
#ifndef smp_store_release
-#define smp_store_release(p, v) __smp_store_release(p, v)
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#endif
#ifndef smp_load_acquire
@@ -178,13 +204,13 @@ do { \
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
-#define virt_mb() __smp_mb()
-#define virt_rmb() __smp_rmb()
-#define virt_wmb() __smp_wmb()
-#define virt_store_mb(var, value) __smp_store_mb(var, value)
-#define virt_mb__before_atomic() __smp_mb__before_atomic()
-#define virt_mb__after_atomic() __smp_mb__after_atomic()
-#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
@@ -251,5 +277,16 @@ do { \
#define pmem_wmb() wmb()
#endif
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc do { } while (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 81915dcd4b4e..c90192b1c755 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -67,6 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,6 +81,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,6 +95,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index 75ef606f7145..eb64bd4f11f3 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -22,6 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -71,6 +73,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index fedc0dfa4877..4f07afacbc23 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
{
}
-static inline void flush_dcache_folio(struct folio *folio) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-#endif
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-void flush_dcache_folio(struct folio *folio);
#endif
#ifndef flush_dcache_mmap_lock
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 7ddd9dc10ce9..fbca56bd9cbc 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -20,7 +20,7 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist ganerating macro. Specify functions which can be
+ * Whitelist generating macro. Specify functions which can be
* error-injectable using this macro.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
@@ -29,7 +29,7 @@ static struct error_injection_entry __used \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
- };
+ }
void override_function_with_return(struct pt_regs *regs);
#else
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
index a53116b8c57e..8a59b6e567df 100644
--- a/include/asm-generic/logic_io.h
+++ b/include/asm-generic/logic_io.h
@@ -34,7 +34,7 @@
void __iomem *ioremap(phys_addr_t offset, size_t size);
#define iounmap iounmap
-void iounmap(void __iomem *addr);
+void iounmap(void volatile __iomem *addr);
#define __raw_readb __raw_readb
u8 __raw_readb(const volatile void __iomem *addr);
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 1dfadb2e878d..690f741764e1 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -130,18 +130,24 @@ static inline bool init_section_intersects(void *virt, size_t size)
/**
* is_kernel_core_data - checks if the pointer address is located in the
- * .data section
+ * .data or .bss section
*
* @addr: address to check
*
- * Returns: true if the address is located in .data, false otherwise.
+ * Returns: true if the address is located in .data or .bss, false otherwise.
* Note: On some archs it may return true for core RODATA, and false
* for others. But will always be true for core RW data.
*/
static inline bool is_kernel_core_data(unsigned long addr)
{
- return addr >= (unsigned long)_sdata &&
- addr < (unsigned long)_edata;
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
}
/**
@@ -193,12 +199,16 @@ static inline bool __is_kernel_text(unsigned long addr)
* @addr: address to check
*
* Returns: true if the address is located in the kernel range, false otherwise.
- * Note: an internal helper, only check the range of _stext to _end.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
*/
static inline bool __is_kernel(unsigned long addr)
{
- return addr >= (unsigned long)_stext &&
- addr < (unsigned long)_end;
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
}
#endif /* _ASM_GENERIC_SECTIONS_H_ */