aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/5level-fixup.h58
-rw-r--r--include/asm-generic/Kbuild58
-rw-r--r--include/asm-generic/access_ok.h48
-rw-r--r--include/asm-generic/archrandom.h15
-rw-r--r--include/asm-generic/atomic-instrumented.h1788
-rw-r--r--include/asm-generic/atomic-long.h1013
-rw-r--r--include/asm-generic/atomic.h120
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/barrier.h117
-rw-r--r--include/asm-generic/bitops.h6
-rw-r--r--include/asm-generic/bitops/atomic.h38
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h7
-rw-r--r--include/asm-generic/bitops/ffs.h2
-rw-r--r--include/asm-generic/bitops/find.h100
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h29
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h13
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h87
-rw-r--r--include/asm-generic/bitops/le.h34
-rw-r--r--include/asm-generic/bitops/lock.h39
-rw-r--r--include/asm-generic/bitops/non-atomic.h109
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/bitsperlong.h12
-rw-r--r--include/asm-generic/bug.h61
-rw-r--r--include/asm-generic/cacheflush.h40
-rw-r--r--include/asm-generic/checksum.h23
-rw-r--r--include/asm-generic/cmpxchg-local.h4
-rw-r--r--include/asm-generic/cmpxchg.h42
-rw-r--r--include/asm-generic/compat.h143
-rw-r--r--include/asm-generic/div64.h24
-rw-r--r--include/asm-generic/dma-contiguous.h10
-rw-r--r--include/asm-generic/early_ioremap.h6
-rw-r--r--include/asm-generic/error-injection.h6
-rw-r--r--include/asm-generic/export.h24
-rw-r--r--include/asm-generic/futex.h33
-rw-r--r--include/asm-generic/getorder.h2
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/hardirq.h6
-rw-r--r--include/asm-generic/hugetlb.h30
-rw-r--r--include/asm-generic/hyperv-tlfs.h793
-rw-r--r--include/asm-generic/io.h267
-rw-r--r--include/asm-generic/iomap.h43
-rw-r--r--include/asm-generic/kmap_size.h12
-rw-r--r--include/asm-generic/kmap_types.h11
-rw-r--r--include/asm-generic/kprobes.h4
-rw-r--r--include/asm-generic/kvm_types.h5
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/memory_model.h37
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/mmiowb.h6
-rw-r--r--include/asm-generic/mmu_context.h58
-rw-r--r--include/asm-generic/module.lds.h10
-rw-r--r--include/asm-generic/mshyperv.h144
-rw-r--r--include/asm-generic/msi.h8
-rw-r--r--include/asm-generic/nommu_context.h19
-rw-r--r--include/asm-generic/numa.h52
-rw-r--r--include/asm-generic/page.h4
-rw-r--r--include/asm-generic/pci.h39
-rw-r--r--include/asm-generic/pci_iomap.h7
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/asm-generic/pgalloc.h92
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h64
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h5
-rw-r--r--include/asm-generic/pgtable-nopud.h7
-rw-r--r--include/asm-generic/pgtable.h1261
-rw-r--r--include/asm-generic/pgtable_uffd.h66
-rw-r--r--include/asm-generic/preempt.h2
-rw-r--r--include/asm-generic/qrwlock.h63
-rw-r--r--include/asm-generic/qrwlock_types.h2
-rw-r--r--include/asm-generic/qspinlock.h38
-rw-r--r--include/asm-generic/qspinlock_types.h8
-rw-r--r--include/asm-generic/rwonce.h90
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/sections.h122
-rw-r--r--include/asm-generic/signal.h2
-rw-r--r--include/asm-generic/softirq_stack.h14
-rw-r--r--include/asm-generic/spinlock.h94
-rw-r--r--include/asm-generic/spinlock_types.h17
-rw-r--r--include/asm-generic/syscall.h36
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/tlb.h159
-rw-r--r--include/asm-generic/topology.h4
-rw-r--r--include/asm-generic/uaccess.h192
-rw-r--r--include/asm-generic/unaligned.h167
-rw-r--r--include/asm-generic/vdso/vsyscall.h14
-rw-r--r--include/asm-generic/vermagic.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h304
-rw-r--r--include/asm-generic/xor.h84
90 files changed, 3475 insertions, 5649 deletions
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
deleted file mode 100644
index 4c74b1c1d13b..000000000000
--- a/include/asm-generic/5level-fixup.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _5LEVEL_FIXUP_H
-#define _5LEVEL_FIXUP_H
-
-#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED 1
-
-#define P4D_SHIFT PGDIR_SHIFT
-#define P4D_SIZE PGDIR_SIZE
-#define P4D_MASK PGDIR_MASK
-#define MAX_PTRS_PER_P4D 1
-#define PTRS_PER_P4D 1
-
-#define p4d_t pgd_t
-
-#define pud_alloc(mm, p4d, address) \
- ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
- NULL : pud_offset(p4d, address))
-
-#define p4d_alloc(mm, pgd, address) (pgd)
-#define p4d_offset(pgd, start) (pgd)
-
-#ifndef __ASSEMBLY__
-static inline int p4d_none(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_bad(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_present(p4d_t p4d)
-{
- return 1;
-}
-#endif
-
-#define p4d_ERROR(p4d) do { } while (0)
-#define p4d_clear(p4d) pgd_clear(p4d)
-#define p4d_val(p4d) pgd_val(p4d)
-#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_page(p4d) pgd_page(p4d)
-#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
-
-#define __p4d(x) __pgd(x)
-#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
-
-#undef p4d_free_tlb
-#define p4d_free_tlb(tlb, x, addr) do { } while (0)
-#define p4d_free(mm, x) do { } while (0)
-
-#undef p4d_addr_end
-#define p4d_addr_end(addr, end) (end)
-
-#endif
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index cd17d50697cc..941be574bbe0 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,6 +4,62 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
-mandatory-y += dma-contiguous.h
+mandatory-y += atomic.h
+mandatory-y += archrandom.h
+mandatory-y += barrier.h
+mandatory-y += bitops.h
+mandatory-y += bug.h
+mandatory-y += bugs.h
+mandatory-y += cacheflush.h
+mandatory-y += checksum.h
+mandatory-y += compat.h
+mandatory-y += current.h
+mandatory-y += delay.h
+mandatory-y += device.h
+mandatory-y += div64.h
+mandatory-y += dma-mapping.h
+mandatory-y += dma.h
+mandatory-y += emergency-restart.h
+mandatory-y += exec.h
+mandatory-y += fb.h
+mandatory-y += ftrace.h
+mandatory-y += futex.h
+mandatory-y += hardirq.h
+mandatory-y += hw_irq.h
+mandatory-y += io.h
+mandatory-y += irq.h
+mandatory-y += irq_regs.h
+mandatory-y += irq_work.h
+mandatory-y += kdebug.h
+mandatory-y += kmap_size.h
+mandatory-y += kprobes.h
+mandatory-y += linkage.h
+mandatory-y += local.h
+mandatory-y += local64.h
+mandatory-y += mmiowb.h
+mandatory-y += mmu.h
+mandatory-y += mmu_context.h
+mandatory-y += module.h
+mandatory-y += module.lds.h
mandatory-y += msi.h
+mandatory-y += pci.h
+mandatory-y += percpu.h
+mandatory-y += pgalloc.h
+mandatory-y += preempt.h
+mandatory-y += rwonce.h
+mandatory-y += sections.h
+mandatory-y += serial.h
+mandatory-y += shmparam.h
mandatory-y += simd.h
+mandatory-y += softirq_stack.h
+mandatory-y += switch_to.h
+mandatory-y += timex.h
+mandatory-y += tlbflush.h
+mandatory-y += topology.h
+mandatory-y += trace_clock.h
+mandatory-y += uaccess.h
+mandatory-y += unaligned.h
+mandatory-y += vermagic.h
+mandatory-y += vga.h
+mandatory-y += word-at-a-time.h
+mandatory-y += xor.h
diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h
new file mode 100644
index 000000000000..2866ae61b1cd
--- /dev/null
+++ b/include/asm-generic/access_ok.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ACCESS_OK_H__
+#define __ASM_GENERIC_ACCESS_OK_H__
+
+/*
+ * Checking whether a pointer is valid for user space access.
+ * These definitions work on most architectures, but overrides can
+ * be used where necessary.
+ */
+
+/*
+ * architectures with compat tasks have a variable TASK_SIZE and should
+ * override this to a constant.
+ */
+#ifndef TASK_SIZE_MAX
+#define TASK_SIZE_MAX TASK_SIZE
+#endif
+
+#ifndef __access_ok
+/*
+ * 'size' is a compile-time constant for most callers, so optimize for
+ * this case to turn the check into a single comparison against a constant
+ * limit and catch all possible overflows.
+ * On architectures with separate user address space (m68k, s390, parisc,
+ * sparc64) or those without an MMU, this should always return true.
+ *
+ * This version was originally contributed by Jonas Bonn for the
+ * OpenRISC architecture, and was found to be the most efficient
+ * for constant 'size' and 'limit' values.
+ */
+static inline int __access_ok(const void __user *ptr, unsigned long size)
+{
+ unsigned long limit = TASK_SIZE_MAX;
+ unsigned long addr = (unsigned long)ptr;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU))
+ return true;
+
+ return (size <= limit) && (addr <= (limit - size));
+}
+#endif
+
+#ifndef access_ok
+#define access_ok(addr, size) likely(__access_ok(addr, size))
+#endif
+
+#endif
diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h
new file mode 100644
index 000000000000..3cd7f980cfdc
--- /dev/null
+++ b/include/asm-generic/archrandom.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ARCHRANDOM_H__
+#define __ASM_GENERIC_ARCHRANDOM_H__
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644
index e8730c6b9fe2..000000000000
--- a/include/asm-generic/atomic-instrumented.h
+++ /dev/null
@@ -1,1788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
-
-static inline int
-atomic_read(const atomic_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic_read(v);
-}
-#define atomic_read atomic_read
-
-#if defined(arch_atomic_read_acquire)
-static inline int
-atomic_read_acquire(const atomic_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic_read_acquire(v);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-static inline void
-atomic_set(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_set(v, i);
-}
-#define atomic_set atomic_set
-
-#if defined(arch_atomic_set_release)
-static inline void
-atomic_set_release(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_set_release(v, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-static inline void
-atomic_add(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_add(i, v);
-}
-#define atomic_add atomic_add
-
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
-atomic_add_return(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#if defined(arch_atomic_add_return_acquire)
-static inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_acquire(i, v);
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#if defined(arch_atomic_add_return_release)
-static inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_release(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#if defined(arch_atomic_add_return_relaxed)
-static inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#if defined(arch_atomic_fetch_add_acquire)
-static inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_acquire(i, v);
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic_fetch_add_release)
-static inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_release(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
-
-static inline void
-atomic_sub(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
-}
-#define atomic_sub atomic_sub
-
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#if defined(arch_atomic_sub_return_acquire)
-static inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_acquire(i, v);
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#if defined(arch_atomic_sub_return_release)
-static inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_release(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#if defined(arch_atomic_sub_return_relaxed)
-static inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_acquire(i, v);
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic_fetch_sub_release)
-static inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_release(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic_inc)
-static inline void
-atomic_inc(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_inc(v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#if defined(arch_atomic_inc_return)
-static inline int
-atomic_inc_return(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#if defined(arch_atomic_inc_return_acquire)
-static inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_acquire(v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#if defined(arch_atomic_inc_return_release)
-static inline int
-atomic_inc_return_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_release(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#if defined(arch_atomic_inc_return_relaxed)
-static inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_inc)
-static inline int
-atomic_fetch_inc(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc(v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_acquire(v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic_fetch_inc_release)
-static inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_release(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic_dec)
-static inline void
-atomic_dec(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_dec(v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#if defined(arch_atomic_dec_return)
-static inline int
-atomic_dec_return(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#if defined(arch_atomic_dec_return_acquire)
-static inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_acquire(v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#if defined(arch_atomic_dec_return_release)
-static inline int
-atomic_dec_return_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_release(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#if defined(arch_atomic_dec_return_relaxed)
-static inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_dec)
-static inline int
-atomic_fetch_dec(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec(v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_acquire(v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic_fetch_dec_release)
-static inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_release(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-static inline void
-atomic_and(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_and(i, v);
-}
-#define atomic_and atomic_and
-
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#if defined(arch_atomic_fetch_and_acquire)
-static inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_acquire(i, v);
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic_fetch_and_release)
-static inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_release(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic_andnot)
-static inline void
-atomic_andnot(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_andnot(i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot)
-static inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot(i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_acquire(i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic_fetch_andnot_release)
-static inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_release(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-static inline void
-atomic_or(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_or(i, v);
-}
-#define atomic_or atomic_or
-
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#if defined(arch_atomic_fetch_or_acquire)
-static inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_acquire(i, v);
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic_fetch_or_release)
-static inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_release(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
-
-static inline void
-atomic_xor(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic_xor(i, v);
-}
-#define atomic_xor atomic_xor
-
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_acquire(i, v);
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic_fetch_xor_release)
-static inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_release(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
-atomic_xchg(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#if defined(arch_atomic_xchg_acquire)
-static inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_acquire(v, i);
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#if defined(arch_atomic_xchg_release)
-static inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_release(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#if defined(arch_atomic_xchg_relaxed)
-static inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_cmpxchg_release)
-static inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_release(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_try_cmpxchg)
-static inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_sub_and_test)
-static inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#if defined(arch_atomic_dec_and_test)
-static inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#if defined(arch_atomic_inc_and_test)
-static inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_and_test(v);
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#if defined(arch_atomic_add_negative)
-static inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#if defined(arch_atomic_fetch_add_unless)
-static inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#if defined(arch_atomic_add_unless)
-static inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_unless(v, a, u);
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#if defined(arch_atomic_inc_not_zero)
-static inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_not_zero(v);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#if defined(arch_atomic_inc_unless_negative)
-static inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_unless_negative(v);
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#if defined(arch_atomic_dec_unless_positive)
-static inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_unless_positive(v);
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#if defined(arch_atomic_dec_if_positive)
-static inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_if_positive(v);
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-static inline s64
-atomic64_read(const atomic64_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic64_read(v);
-}
-#define atomic64_read atomic64_read
-
-#if defined(arch_atomic64_read_acquire)
-static inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- kasan_check_read(v, sizeof(*v));
- return arch_atomic64_read_acquire(v);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-static inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
-}
-#define atomic64_set atomic64_set
-
-#if defined(arch_atomic64_set_release)
-static inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_set_release(v, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-static inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
-}
-#define atomic64_add atomic64_add
-
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return(i, v);
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#if defined(arch_atomic64_add_return_acquire)
-static inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_acquire(i, v);
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#if defined(arch_atomic64_add_return_release)
-static inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_release(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add(i, v);
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_acquire(i, v);
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_add_release)
-static inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_release(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
-
-static inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
-}
-#define atomic64_sub atomic64_sub
-
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_acquire(i, v);
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#if defined(arch_atomic64_sub_return_release)
-static inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_release(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub(i, v);
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_acquire(i, v);
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_release(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic64_inc)
-static inline void
-atomic64_inc(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_inc(v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#if defined(arch_atomic64_inc_return)
-static inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_acquire(v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#if defined(arch_atomic64_inc_return_release)
-static inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_release(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_inc)
-static inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc(v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_acquire(v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_release(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic64_dec)
-static inline void
-atomic64_dec(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_dec(v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#if defined(arch_atomic64_dec_return)
-static inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_acquire(v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#if defined(arch_atomic64_dec_return_release)
-static inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_release(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_dec)
-static inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec(v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_acquire(v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_release(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-static inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
-}
-#define atomic64_and atomic64_and
-
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and(i, v);
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_acquire(i, v);
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_and_release)
-static inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_release(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic64_andnot)
-static inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_andnot(i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot)
-static inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot(i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_release(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-static inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
-}
-#define atomic64_or atomic64_or
-
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or(i, v);
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_acquire(i, v);
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_or_release)
-static inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_release(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
-
-static inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
-}
-#define atomic64_xor atomic64_xor
-
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor(i, v);
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_acquire(i, v);
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_release(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#if defined(arch_atomic64_xchg_acquire)
-static inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_acquire(v, i);
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#if defined(arch_atomic64_xchg_release)
-static inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_release(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_release(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_sub_and_test)
-static inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_and_test(i, v);
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#if defined(arch_atomic64_dec_and_test)
-static inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#if defined(arch_atomic64_inc_and_test)
-static inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#if defined(arch_atomic64_add_negative)
-static inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_negative(i, v);
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#if defined(arch_atomic64_add_unless)
-static inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#if defined(arch_atomic64_inc_not_zero)
-static inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_unless_negative(v);
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_unless_positive(v);
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#if defined(arch_atomic64_dec_if_positive)
-static inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
-#define xchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_acquire)
-#define xchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_release)
-#define xchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_relaxed)
-#define xchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
-#define cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_acquire)
-#define cmpxchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_release)
-#define cmpxchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_relaxed)
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
-#define cmpxchg64(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_acquire)
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_release)
-#define cmpxchg64_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_relaxed)
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#define cmpxchg_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644
index 881c7e27af28..000000000000
--- a/include/asm-generic/atomic-long.h
+++ /dev/null
@@ -1,1013 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic64_read(v);
-}
-
-static inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic64_read_acquire(v);
-}
-
-static inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic64_set(v, i);
-}
-
-static inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic64_set_release(v, i);
-}
-
-static inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic64_add(i, v);
-}
-
-static inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic64_add_return(i, v);
-}
-
-static inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_add_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic64_add_return_release(i, v);
-}
-
-static inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_add_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_relaxed(i, v);
-}
-
-static inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic64_sub(i, v);
-}
-
-static inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic64_sub_return(i, v);
-}
-
-static inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_release(i, v);
-}
-
-static inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic64_inc(v);
-}
-
-static inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic64_inc_return(v);
-}
-
-static inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic64_inc_return_acquire(v);
-}
-
-static inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic64_inc_return_release(v);
-}
-
-static inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic64_inc_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic64_fetch_inc(v);
-}
-
-static inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_inc_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic64_fetch_inc_release(v);
-}
-
-static inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_inc_relaxed(v);
-}
-
-static inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic64_dec(v);
-}
-
-static inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic64_dec_return(v);
-}
-
-static inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic64_dec_return_acquire(v);
-}
-
-static inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic64_dec_return_release(v);
-}
-
-static inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic64_dec_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic64_fetch_dec(v);
-}
-
-static inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_dec_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic64_fetch_dec_release(v);
-}
-
-static inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_dec_relaxed(v);
-}
-
-static inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic64_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_relaxed(i, v);
-}
-
-static inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic64_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic64_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_relaxed(i, v);
-}
-
-static inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic64_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic64_xchg(v, i);
-}
-
-static inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic64_xchg_acquire(v, i);
-}
-
-static inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic64_xchg_release(v, i);
-}
-
-static inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic64_xchg_relaxed(v, i);
-}
-
-static inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_release(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic64_sub_and_test(i, v);
-}
-
-static inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic64_dec_and_test(v);
-}
-
-static inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic64_inc_and_test(v);
-}
-
-static inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic64_add_negative(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_fetch_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic64_inc_not_zero(v);
-}
-
-static inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic64_inc_unless_negative(v);
-}
-
-static inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic64_dec_unless_positive(v);
-}
-
-static inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic_read(v);
-}
-
-static inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic_read_acquire(v);
-}
-
-static inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic_set(v, i);
-}
-
-static inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic_set_release(v, i);
-}
-
-static inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_add(i, v);
-}
-
-static inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic_add_return(i, v);
-}
-
-static inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_add_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic_add_return_release(i, v);
-}
-
-static inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_add_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic_fetch_add(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_relaxed(i, v);
-}
-
-static inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_sub(i, v);
-}
-
-static inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic_sub_return(i, v);
-}
-
-static inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_sub_return_acquire(i, v);
-}
-
-static inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic_sub_return_release(i, v);
-}
-
-static inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_sub_return_relaxed(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_relaxed(i, v);
-}
-
-static inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic_inc(v);
-}
-
-static inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic_inc_return(v);
-}
-
-static inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic_inc_return_acquire(v);
-}
-
-static inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic_inc_return_release(v);
-}
-
-static inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic_inc_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic_fetch_inc(v);
-}
-
-static inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic_fetch_inc_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic_fetch_inc_release(v);
-}
-
-static inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_inc_relaxed(v);
-}
-
-static inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic_dec(v);
-}
-
-static inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic_dec_return(v);
-}
-
-static inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic_dec_return_acquire(v);
-}
-
-static inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic_dec_return_release(v);
-}
-
-static inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic_dec_return_relaxed(v);
-}
-
-static inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic_fetch_dec(v);
-}
-
-static inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic_fetch_dec_acquire(v);
-}
-
-static inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic_fetch_dec_release(v);
-}
-
-static inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_dec_relaxed(v);
-}
-
-static inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic_fetch_and(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_relaxed(i, v);
-}
-
-static inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic_fetch_or(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_relaxed(i, v);
-}
-
-static inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_acquire(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_release(i, v);
-}
-
-static inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_relaxed(i, v);
-}
-
-static inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic_xchg(v, i);
-}
-
-static inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic_xchg_acquire(v, i);
-}
-
-static inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic_xchg_release(v, i);
-}
-
-static inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic_xchg_relaxed(v, i);
-}
-
-static inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_release(v, old, new);
-}
-
-static inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic_sub_and_test(i, v);
-}
-
-static inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic_dec_and_test(v);
-}
-
-static inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic_inc_and_test(v);
-}
-
-static inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic_add_negative(i, v);
-}
-
-static inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_fetch_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_add_unless(v, a, u);
-}
-
-static inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic_inc_not_zero(v);
-}
-
-static inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic_inc_unless_negative(v);
-}
-
-static inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic_dec_unless_positive(v);
-}
-
-static inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 286867f593d2..04b8be9f1a77 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,89 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-#define ATOMIC_INIT(i) { (i) }
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#include <linux/irqflags.h>
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 85b28eb80b11..961f4d88f9ef 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -14,12 +14,43 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
+#include <asm/rwonce.h>
#ifndef nop
#define nop() asm volatile ("nop")
#endif
/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
@@ -38,6 +69,10 @@
#define wmb() mb()
#endif
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
@@ -46,10 +81,6 @@
#define dma_wmb() wmb()
#endif
-#ifndef read_barrier_depends
-#define read_barrier_depends() do { } while (0)
-#endif
-
#ifndef __smp_mb
#define __smp_mb() mb()
#endif
@@ -62,26 +93,18 @@
#define __smp_wmb() wmb()
#endif
-#ifndef __smp_read_barrier_depends
-#define __smp_read_barrier_depends() read_barrier_depends()
-#endif
-
#ifdef CONFIG_SMP
#ifndef smp_mb
-#define smp_mb() __smp_mb()
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
#endif
#ifndef smp_rmb
-#define smp_rmb() __smp_rmb()
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
#endif
#ifndef smp_wmb
-#define smp_wmb() __smp_wmb()
-#endif
-
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() __smp_read_barrier_depends()
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
#endif
#else /* !CONFIG_SMP */
@@ -98,10 +121,6 @@
#define smp_wmb() barrier()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() do { } while (0)
-#endif
-
#endif /* CONFIG_SMP */
#ifndef __smp_store_mb
@@ -128,29 +147,29 @@ do { \
#ifndef __smp_load_acquire
#define __smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
#ifdef CONFIG_SMP
#ifndef smp_store_mb
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
#endif
#ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
#endif
#ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
#endif
#ifndef smp_store_release
-#define smp_store_release(p, v) __smp_store_release(p, v)
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#endif
#ifndef smp_load_acquire
@@ -183,24 +202,23 @@ do { \
#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
-#define virt_mb() __smp_mb()
-#define virt_rmb() __smp_rmb()
-#define virt_wmb() __smp_wmb()
-#define virt_read_barrier_depends() __smp_read_barrier_depends()
-#define virt_store_mb(var, value) __smp_store_mb(var, value)
-#define virt_mb__before_atomic() __smp_mb__before_atomic()
-#define virt_mb__after_atomic() __smp_mb__after_atomic()
-#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
@@ -229,14 +247,14 @@ do { \
#ifndef smp_cond_load_relaxed
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#endif
@@ -250,12 +268,33 @@ do { \
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
- typeof(*ptr) _val; \
+ __unqual_scalar_typeof(*ptr) _val; \
_val = smp_cond_load_relaxed(ptr, cond_expr); \
smp_acquire__after_ctrl_dep(); \
- _val; \
+ (typeof(*ptr))_val; \
})
#endif
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding instructions have
+ * updated persistent storage before any data access or data transfer
+ * caused by subsequent instructions is initiated.
+ */
+#ifndef pmem_wmb
+#define pmem_wmb() wmb()
+#endif
+
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc() do { } while (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index bfc96bf6606e..a47b8a71d6fe 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -4,8 +4,9 @@
/*
* For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents. You should
- * recode these in the native assembly language, if at all possible.
+ * architecture, here are some C-language equivalents. They should
+ * generate reasonable code, so take a look at what your compiler spits
+ * out before rolling your own buggy implementation in assembly language.
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
@@ -19,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dd90c9792909..71ab4ba9c25d 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,58 +11,60 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
- old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
- old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
index 458c85ebcd15..7b129329046b 100644
--- a/include/asm-generic/bitops/builtin-ffs.h
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -8,11 +8,8 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
-static __always_inline int ffs(int x)
-{
- return __builtin_ffs(x);
-}
+#define ffs(x) __builtin_ffs(x)
#endif
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index e81868b2c0f0..323fd5d6ae26 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -8,7 +8,7 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
static inline int ffs(int x)
{
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
deleted file mode 100644
index 9fdf21302fdf..000000000000
--- a/include/asm-generic/bitops/find.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_BITOPS_FIND_H_
-#define _ASM_GENERIC_BITOPS_FIND_H_
-
-#ifndef find_next_bit
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
- size, unsigned long offset);
-#endif
-
-#ifndef find_next_and_bit
-/**
- * find_next_and_bit - find the next set bit in both memory regions
- * @addr1: The first address to base the search on
- * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset);
-#endif
-
-#ifndef find_next_zero_bit
-/**
- * find_next_zero_bit - find the next cleared bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number of the next zero bit
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
- long size, unsigned long offset);
-#endif
-
-#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first set bit.
- * If no bits are set, returns @size.
- */
-extern unsigned long find_first_bit(const unsigned long *addr,
- unsigned long size);
-
-/**
- * find_first_zero_bit - find the first cleared bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first cleared bit.
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size);
-#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-#ifndef find_first_bit
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-#endif
-#ifndef find_first_zero_bit
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-#endif
-
-#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-/**
- * find_next_clump8 - find next 8-bit clump with set bits in a memory region
- * @clump: location to store copy of found clump
- * @addr: address to base the search on
- * @size: bitmap size in number of bits
- * @offset: bit offset at which to start searching
- *
- * Returns the bit offset for the next set clump; the found clump value is
- * copied to the location pointed by @clump. If no bits are set, returns @size.
- */
-extern unsigned long find_next_clump8(unsigned long *clump,
- const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-#define find_first_clump8(clump, bits, size) \
- find_next_clump8((clump), (bits), (size), 0)
-
-#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000000..564a8c675d85
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..4225a8ca9c1a 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -23,9 +23,9 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -36,9 +36,9 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
-static inline void clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -52,9 +52,9 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -65,9 +65,10 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -78,9 +79,10 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -91,9 +93,10 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..eb64bd4f11f3 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,8 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +38,8 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +54,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +73,8 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..2b238b161a62 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,10 +11,10 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
- * __clear_bit - Clears a bit in memory
+ * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -37,14 +38,15 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -52,63 +54,104 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
+static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline bool test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 188d3eba3ace..d51beff60375 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -9,46 +9,12 @@
#define BITOP_LE_SWIZZLE 0
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_zero_bit(addr, size, offset);
-}
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_bit(addr, size, offset);
-}
-
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
-{
- return find_first_zero_bit(addr, size);
-}
-
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifndef find_next_zero_bit_le
-extern unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-#endif
-
-#ifndef find_next_bit_le
-extern unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
static inline int test_bit_le(int nr, const void *addr)
{
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 3ae021368f48..630f2f6b9595 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,7 @@
#include <asm/barrier.h>
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
@@ -15,8 +15,8 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(unsigned int nr,
- volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
if (READ_ONCE(*p) & mask)
return 1;
- old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
*
* See for example x86's implementation.
*/
-static inline void __clear_bit_unlock(unsigned int nr,
- volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
unsigned long old;
p += BIT_WORD(nr);
old = READ_ONCE(*p);
old &= ~BIT_MASK(nr);
- atomic_long_set_release((atomic_long_t *)p, old);
+ arch_atomic_long_set_release((atomic_long_t *)p, old);
}
/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
+ * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
* @nr: the bit to clear
* @addr: the address to start counting from
*
* This is a bit of a one-trick-pony for the filemap code, which clears
* PG_locked and tests PG_waiters,
*/
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
- volatile unsigned long *p)
+#ifndef arch_clear_bit_unlock_is_negative_byte
+static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7));
}
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
#endif
+#include <asm-generic/bitops/instrumented-lock.h>
+
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..71f8d54a5195 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -2,108 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <asm-generic/bitops/generic-non-atomic.h>
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
- *p |= mask;
-}
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
- *p &= ~mask;
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p ^= mask;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000000..0ddc78dfc358
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 3905c1c93dc2..1023e2a4bd37 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -23,4 +23,16 @@
#define BITS_PER_LONG_LONG 64
#endif
+/*
+ * small_const_nbits(n) is true precisely when it is known at compile-time
+ * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows
+ * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps
+ * of size 0 are very rare, and a compile-time-known-size 0 is most likely
+ * a sign of error. They will be handled correctly by the bit/bitmap APIs,
+ * but using the out-of-line functions, so that the inline implementations
+ * can unconditionally dereference the pointer(s).
+ */
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 384b5c835ced..4050b191e1a9 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,8 @@
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
+#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -16,7 +18,14 @@
#endif
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
+
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
#ifdef CONFIG_BUG
@@ -83,14 +92,19 @@ extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
-#define __WARN_printf(taint, arg...) \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ instrumentation_end(); \
+ } while (0)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
__warn_printk(arg); \
__WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ instrumentation_end(); \
} while (0)
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
@@ -102,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#endif
/* used internally by panic.c */
-struct warn_args;
-struct pt_regs;
-
-void __warn(const char *file, int line, void *caller, unsigned taint,
- struct pt_regs *regs, struct warn_args *args);
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
@@ -134,39 +143,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_ON(1); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
-#define WARN_ONCE(condition, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN(1, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
-#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_TAINT(1, taint, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index cac7404b2bdd..f46258d1a080 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_CACHEFLUSH_H
-#define __ASM_CACHEFLUSH_H
+#ifndef _ASM_GENERIC_CACHEFLUSH_H
+#define _ASM_GENERIC_CACHEFLUSH_H
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
+#include <linux/instrumented.h>
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
/*
* The cache doesn't need to be flushed when TLB entries change when
@@ -45,10 +47,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_dcache_page
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static inline void flush_dcache_page(struct page *page)
{
}
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#endif
#ifndef flush_dcache_mmap_lock
@@ -69,6 +73,10 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_icache_user_range
+#define flush_icache_user_range flush_icache_range
+#endif
+
#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
@@ -76,8 +84,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_icache_user_range
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
+#ifndef flush_icache_user_page
+static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
@@ -99,14 +107,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
+ flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
-#endif /* __ASM_CACHEFLUSH_H */
+#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 34785c0f57b0..43e18db89c14 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -16,29 +16,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
-
-#ifndef csum_partial_copy_nocheck
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
-#endif
-
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..380cdc824e4b 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..dca4419922a9 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index a86f65bffab8..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -2,6 +2,30 @@
#ifndef __ASM_GENERIC_COMPAT_H
#define __ASM_GENERIC_COMPAT_H
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#ifndef compat_arg_u64
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif /* compat_arg_u64 */
+
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -20,6 +44,125 @@ typedef u16 compat_ushort_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
+typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
+typedef u32 compat_old_sigset_t;
+
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
+#ifndef __compat_uid32_t
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+#endif
+
+#ifndef compat_mode_t
+typedef u32 compat_mode_t;
+#endif
+
+#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+#else
+typedef s64 compat_s64;
+typedef u64 compat_u64;
+#endif
+
+#ifndef _COMPAT_NSIG
+typedef u32 compat_sigset_word;
+#define _COMPAT_NSIG _NSIG
+#define _COMPAT_NSIG_BPW 32
+#endif
+
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
#endif
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index a3b98c86f077..13f5aa68a455 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -8,12 +8,14 @@
* Optimization for constant divisors on 32-bit machines:
* Copyright (C) 2006-2015 Nicolas Pitre
*
- * The semantics of do_div() are:
+ * The semantics of do_div() is, in C++ notation, observing that the name
+ * is a function-like macro and the n parameter has the semantics of a C++
+ * reference:
*
- * uint32_t do_div(uint64_t *n, uint32_t base)
+ * uint32_t do_div(uint64_t &n, uint32_t base)
* {
- * uint32_t remainder = *n % base;
- * *n = *n / base;
+ * uint32_t remainder = n % base;
+ * n = n / base;
* return remainder;
* }
*
@@ -55,17 +57,11 @@
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
- * multiplications which ought to be much faster. And yet only if compiling
- * with a sufficiently recent gcc version to perform proper 64-bit constant
- * propagation.
+ * multiplications which ought to be much faster.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
-#ifndef __div64_const32_is_OK
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-#endif
-
#define __div64_const32(n, ___b) \
({ \
/* \
@@ -228,8 +224,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
- } else if (__div64_const32_is_OK && \
- __builtin_constant_p(__base) && \
+ } else if (__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
@@ -239,8 +234,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
- } else \
+ } else { \
__rem = __div64_32(&(n), __base); \
+ } \
__rem; \
})
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index f24b0f9a4f05..000000000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
-#define _ASM_GENERIC_DMA_CONTIGUOUS_H
-
-#include <linux/types.h>
-
-static inline void
-dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
-
-#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9def22e6e2b3..9d0479f50f97 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr,
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
-/*
- * Weak function called by early_ioremap_reset(). It does nothing, but
- * architectures may provide their own version to do any needed cleanups.
- */
-extern void early_ioremap_shutdown(void);
-
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 80ca61058dd2..fbca56bd9cbc 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -20,16 +20,16 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist ganerating macro. Specify functions which can be
+ * Whitelist generating macro. Specify functions which can be
* error-injectable using this macro.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
- __attribute__((__section__("_error_injection_whitelist"))) \
+ __section("_error_injection_whitelist") \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
- };
+ }
void override_function_with_return(struct pt_regs *regs);
#else
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 365345f9a9e3..5e4b1f2369d2 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -2,6 +2,14 @@
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
+/*
+ * This comment block is used by fixdep. Please do not remove.
+ *
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
+ */
+
#ifndef KSYM_FUNC
#define KSYM_FUNC(x) x
#endif
@@ -12,9 +20,6 @@
#else
#define KSYM_ALIGN 4
#endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 4
-#endif
.macro __put, val, name
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
@@ -33,7 +38,7 @@
*/
.macro ___EXPORT_SYMBOL name,val,sec
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
__ksymtab_\name:
@@ -43,17 +48,6 @@ __ksymtab_\name:
__kstrtab_\name:
.asciz "\name"
.previous
-#ifdef CONFIG_MODVERSIONS
- .section ___kcrctab\sec+\name,"a"
- .balign KCRC_ALIGN
-#if defined(CONFIG_MODULE_REL_CRCS)
- .long __crc_\name - .
-#else
- .long __crc_\name
-#endif
- .weak __crc_\name
- .previous
-#endif
#endif
.endm
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 02970b11f71f..2a19215baae5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,15 +6,22 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion.
*
*/
+#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
+ futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
+#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
+ futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
+#endif /* CONFIG_SMP */
+#endif
/**
- * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -28,13 +35,12 @@
* -ENOSYS - Operation not supported
*/
static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval, ret;
u32 tmp;
preempt_disable();
- pagefault_disable();
ret = -EFAULT;
if (unlikely(get_user(oldval, uaddr) != 0))
@@ -67,7 +73,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
ret = -EFAULT;
out_pagefault_enable:
- pagefault_enable();
preempt_enable();
if (ret == 0)
@@ -77,7 +82,7 @@ out_pagefault_enable:
}
/**
- * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is
* oldval.
* @uval: pointer to store content of @uaddr
@@ -89,10 +94,9 @@ out_pagefault_enable:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
- * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
u32 val;
@@ -114,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0;
}
-#else
-static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
-{
- return -ENOSYS;
-}
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SMP */
#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index e9f20b813a69..f2979e3a96b6 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -26,7 +26,7 @@
*
* The result is undefined if the size is 0.
*/
-static inline __attribute_const__ int get_order(unsigned long size)
+static __always_inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 19eadac415c4..aea9aee1f3e9 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -2,10 +2,8 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
-#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/of.h>
#ifdef CONFIG_GPIOLIB
@@ -140,6 +138,8 @@ static inline void gpio_unexport(unsigned gpio)
#else /* !CONFIG_GPIOLIB */
+#include <linux/kernel.h>
+
static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index d14214dfc10b..7317e8258b48 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -7,9 +7,13 @@
typedef struct {
unsigned int __softirq_pending;
+#ifdef ARCH_WANTS_NMI_IRQSTAT
+ unsigned int __nmi_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
#include <linux/irq.h>
#ifndef ack_bad_irq
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 822f433ac95c..a57d667addd2 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -2,6 +2,9 @@
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
@@ -32,6 +35,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
+{
+ return pte_mkuffd_wp(pte);
+}
+
+static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_uffd_wp(pte);
+}
+
+static inline int huge_pte_uffd_wp(pte_t pte)
+{
+ return pte_uffd_wp(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
@@ -66,10 +84,10 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- ptep_clear_flush(vma, addr, ptep);
+ return ptep_clear_flush(vma, addr, ptep);
}
#endif
@@ -80,6 +98,12 @@ static inline int huge_pte_none(pte_t pte)
}
#endif
+/* Please refer to comments above pte_none_mostly() for the usage */
+static inline int huge_pte_none_mostly(pte_t pte)
+{
+ return huge_pte_none(pte) || is_pte_marker(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
@@ -122,7 +146,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
- return *ptep;
+ return ptep_get(ptep);
}
#endif
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
new file mode 100644
index 000000000000..b17c6eeb9afa
--- /dev/null
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
+ * Specification (TLFS):
+ * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+ */
+
+#ifndef _ASM_GENERIC_HYPERV_TLFS_H
+#define _ASM_GENERIC_HYPERV_TLFS_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/time64.h>
+
+/*
+ * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
+ * of 4096. These definitions are used when communicating with Hyper-V using
+ * guest physical pages and guest physical page addresses, since the guest page
+ * size may not be 4096 on all architectures.
+ */
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+
+/*
+ * Hyper-V provides two categories of flags relevant to guest VMs. The
+ * "Features" category indicates specific functionality that is available
+ * to guests on this particular instance of Hyper-V. The "Features"
+ * are presented in four groups, each of which is 32 bits. The group A
+ * and B definitions are common across architectures and are listed here.
+ * However, not all flags are relevant on all architectures.
+ *
+ * Groups C and D vary across architectures and are listed in the
+ * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
+ * on multiple architectures, but the bit positions are different so they
+ * cannot appear in the generic portion of hyperv-tlfs.h.
+ *
+ * The "Enlightenments" category provides recommendations on whether to use
+ * specific enlightenments that are available. The Enlighenments are a single
+ * group of 32 bits, but they vary across architectures and are listed in
+ * the architecture specific portion of hyperv-tlfs.h.
+ */
+
+/*
+ * Group A Features.
+ */
+
+/* VP Runtime register available */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+/* Partition Reference Counter available*/
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+/* Basic SynIC register available */
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+/* Synthetic Timer registers available */
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+/* Virtual APIC assist and VP assist page registers available */
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+/* Hypercall and Guest OS ID registers available*/
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+/* Access virtual processor index register available*/
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+/* Virtual system reset register available*/
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+/* Access statistics page registers available */
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+/* Partition reference TSC register is available */
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+/* Partition Guest IDLE register is available */
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+/* Partition local APIC and TSC frequency registers available */
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+/* AccessReenlightenmentControls privilege */
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+/* AccessTscInvariantControls privilege */
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * Group B features.
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+/*
+ * TSC page layout.
+ */
+struct ms_hyperv_tsc_page {
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V specification:
+ *
+ * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ *
+ *
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/*
+ * Crash notification flags.
+ */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+/* Declare the various hypercall operations. */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* Extended capability bits */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
+#define HV_PARTITION_ID_SELF ((u64)-1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* hypercall status code */
+#define HV_STATUS_SUCCESS 0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
+#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
+#define HV_STATUS_OPERATION_DENIED 8
+#define HV_STATUS_INSUFFICIENT_MEMORY 11
+#define HV_STATUS_INVALID_PORT_ID 17
+#define HV_STATUS_INVALID_CONNECTION_ID 18
+#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define synthetic interrupt controller message constants. */
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/*
+ * Define hypervisor message types. Some of the message types
+ * are x86/x64 specific, but there's no good way to separate
+ * them out into the arch-specific version of hyperv-tlfs.h
+ * because C doesn't provide a way to extend enum types.
+ * Keeping them all in the arch neutral hyperv-tlfs.h seems
+ * the least messy compromise.
+ */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
+};
+
+/* Define synthetic interrupt controller message flags. */
+union hv_message_flags {
+ __u8 asu8;
+ struct {
+ __u8 msg_pending:1;
+ __u8 reserved:7;
+ } __packed;
+};
+
+/* Define port identifier type. */
+union hv_port_id {
+ __u32 asu32;
+ struct {
+ __u32 id:24;
+ __u32 reserved:8;
+ } __packed u;
+};
+
+/* Define synthetic interrupt controller message header. */
+struct hv_message_header {
+ __u32 message_type;
+ __u8 payload_size;
+ union hv_message_flags message_flags;
+ __u8 reserved[2];
+ union {
+ __u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller message format. */
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ __u32 timer_index;
+ __u32 reserved;
+ __u64 expiration_time; /* When the timer expired */
+ __u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
+
+/*
+ * Synthetic timer configuration.
+ */
+union hv_stimer_config {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 periodic:1;
+ u64 lazy:1;
+ u64 auto_enable:1;
+ u64 apic_vector:8;
+ u64 direct_mode:1;
+ u64 reserved_z0:3;
+ u64 sintx:4;
+ u64 reserved_z1:44;
+ } __packed;
+};
+
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:63;
+ } __packed;
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 masked:1;
+ u64 auto_eoi:1;
+ u64 polling:1;
+ u64 reserved2:45;
+ } __packed;
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled:1;
+ u64 preserved:11;
+ u64 base_simp_gpa:52;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled:1;
+ u64 preserved:11;
+ u64 base_siefp_gpa:52;
+ } __packed;
+};
+
+struct hv_vpset {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages:11;
+ u64 largepage:1;
+ u64 basepfn:52;
+ } page;
+ struct {
+ u64 reserved:12;
+ u64 page_size:1;
+ u64 reserved1:8;
+ u64 base_large_pfn:43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
+struct hv_tlb_flush {
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+/* HvGetPartitionId hypercall (output only) */
+struct hv_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory {
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+/* Not a union in windows but useful for zeroing */
+union hv_proximity_domain_info {
+ struct {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+ };
+ u64 as_uint64;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+/* HvAddLogicalProcessor hypercall */
+struct hv_add_logical_processor_in {
+ u32 lp_index;
+ u32 apic_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+struct hv_add_logical_processor_out {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum HV_SUBNODE_TYPE
+{
+ HvSubnodeAny = 0,
+ HvSubnodeSocket = 1,
+ HvSubnodeAmdNode = 2,
+ HvSubnodeL3 = 3,
+ HvSubnodeCount = 4,
+ HvSubnodeInvalid = -1
+};
+
+/* HvCreateVp hypercall */
+struct hv_create_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+enum hv_interrupt_source {
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector:8;
+ u32 delivery_mode:3;
+ u32 destination_mode:1;
+ u32 delivery_status:1;
+ u32 interrupt_polarity:1;
+ u32 remote_irr:1;
+ u32 trigger_mode:1;
+ u32 interrupt_mask:1;
+ u32 reserved1:15;
+
+ u32 reserved2:24;
+ u32 destination_id:8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+struct hv_interrupt_entry {
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target {
+ u32 vector;
+ u32 flags;
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+
+/* HvGetVpRegisters hypercall input with variable size reg name list*/
+struct hv_get_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct input {
+ u32 name0;
+ u32 name1;
+ } element[];
+} __packed;
+
+
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+/* HvSetVpRegisters hypercall with variable size reg name/value list*/
+struct hv_set_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct {
+ u32 name;
+ u32 padding1;
+ u64 padding2;
+ u64 valuelow;
+ u64 valuehigh;
+ } element[];
+} __packed;
+
+enum hv_device_type {
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+typedef u16 hv_pci_rid;
+typedef u16 hv_pci_segment;
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf {
+ u16 as_uint16;
+
+ struct {
+ u8 function:3;
+ u8 device:5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+union hv_device_id {
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0:62;
+ u64 device_type:2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id:62;
+ u64 device_type:2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits:2;
+ u16 source_shadow:1;
+
+ u16 rsvdz0:11;
+ u16 device_type:2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3:14;
+ u16 device_type:2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count:30;
+ u32 device_type:2;
+ } acpi;
+} __packed;
+
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u64 flags;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint {
+ u64 type:2;
+ u64 reserved:62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d39ac997dda8..a68f8fbf423b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -10,6 +10,7 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
+#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@@ -61,6 +62,44 @@
#define __io_par(v) __io_ar(v)
#endif
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+
+#else
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
+ log_read_mmio(8, addr, _THIS_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
return val;
}
#endif
@@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
+ log_read_mmio(16, addr, _THIS_IP_);
__io_br();
- val = __le16_to_cpu(__raw_readw(addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
return val;
}
#endif
@@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
+ log_read_mmio(32, addr, _THIS_IP_);
__io_br();
- val = __le32_to_cpu(__raw_readl(addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
return val;
}
#endif
@@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
+ log_read_mmio(64, addr, _THIS_IP_);
__io_br();
val = __le64_to_cpu(__raw_readq(addr));
__io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
}
#endif
@@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__io_bw();
- __raw_writew(cpu_to_le16(value), addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__io_bw();
- __raw_writel(__cpu_to_le32(value), addr);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
__io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_);
+ val = __le16_to_cpu(__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_);
+ val = __le32_to_cpu(__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_);
+ val = __le64_to_cpu(__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__raw_writew(cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__raw_writel(__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__raw_writeq(__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
@@ -448,17 +531,15 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
-#include <linux/logic_pio.h>
-
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
* take effect.
*/
-#ifndef inb
-#define inb inb
-static inline u8 inb(unsigned long addr)
+#if !defined(inb) && !defined(_inb)
+#define _inb _inb
+static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -469,35 +550,35 @@ static inline u8 inb(unsigned long addr)
}
#endif
-#ifndef inw
-#define inw inw
-static inline u16 inw(unsigned long addr)
+#if !defined(inw) && !defined(_inw)
+#define _inw _inw
+static inline u16 _inw(unsigned long addr)
{
u16 val;
__io_pbr();
- val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
__io_par(val);
return val;
}
#endif
-#ifndef inl
-#define inl inl
-static inline u32 inl(unsigned long addr)
+#if !defined(inl) && !defined(_inl)
+#define _inl _inl
+static inline u32 _inl(unsigned long addr)
{
u32 val;
__io_pbr();
- val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
__io_par(val);
return val;
}
#endif
-#ifndef outb
-#define outb outb
-static inline void outb(u8 value, unsigned long addr)
+#if !defined(outb) && !defined(_outb)
+#define _outb _outb
+static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
@@ -505,26 +586,52 @@ static inline void outb(u8 value, unsigned long addr)
}
#endif
-#ifndef outw
-#define outw outw
-static inline void outw(u16 value, unsigned long addr)
+#if !defined(outw) && !defined(_outw)
+#define _outw _outw
+static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
- __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
-#ifndef outl
-#define outl outl
-static inline void outl(u32 value, unsigned long addr)
+#if !defined(outl) && !defined(_outl)
+#define _outl _outl
+static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
- __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
+#include <linux/logic_pio.h>
+
+#ifndef inb
+#define inb _inb
+#endif
+
+#ifndef inw
+#define inw _inw
+#endif
+
+#ifndef inl
+#define inl _inl
+#endif
+
+#ifndef outb
+#define outb _outb
+#endif
+
+#ifndef outw
+#define outw _outw
+#endif
+
+#ifndef outl
+#define outl _outl
+#endif
+
#ifndef inb_p
#define inb_p inb_p
static inline u8 inb_p(unsigned long addr)
@@ -887,18 +994,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *)(x))
-#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-}
-#endif
-#endif /* CONFIG_GENERIC_IOMAP */
-
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
@@ -930,7 +1025,9 @@ static inline void *phys_to_virt(unsigned long address)
*
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
* for specific drivers if the architecture choses to implement them. If they
- * are not implemented we fall back to plain ioremap.
+ * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
+ * can provide stricter non-posted write semantics if the architecture
+ * implements them.
*/
#ifndef CONFIG_MMU
#ifndef ioremap
@@ -943,14 +1040,41 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ * - true means continue to remap
+ * - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ * - true means continue to vunmap
+ * - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+ return true;
+}
+#endif
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
@@ -983,6 +1107,23 @@ static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
}
#endif
+/*
+ * ioremap_np needs an explicit architecture implementation, as it
+ * requests stronger semantics than regular ioremap(). Portable drivers
+ * should instead use one of the higher-level abstractions, like
+ * devm_ioremap_resource(), to choose the correct variant for any given
+ * device and bus. Portable drivers with a good reason to want non-posted
+ * write semantics should always provide an ioremap() fallback in case
+ * ioremap_np() is not available.
+ */
+#ifndef ioremap_np
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
@@ -992,6 +1133,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1006,15 +1148,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#ifndef xlate_dev_kmem_ptr
-#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
-static inline void *xlate_dev_kmem_ptr(void *addr)
-{
- return addr;
-}
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef pci_iounmap
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
#ifndef xlate_dev_mem_ptr
@@ -1032,20 +1169,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
-#ifdef CONFIG_VIRT_TO_BUS
-#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(void *address)
-{
- return (unsigned long)address;
-}
-
-static inline void *bus_to_virt(unsigned long address)
-{
- return (void *)address;
-}
-#endif
-#endif
-
#ifndef memset_io
#define memset_io memset_io
/**
@@ -1098,6 +1221,8 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
}
#endif
+extern int devmem_is_allowed(unsigned long pfn);
+
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9d28a5e82f73..08237ae8b840 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -26,14 +26,14 @@
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
-extern unsigned int ioread8(void __iomem *);
-extern unsigned int ioread16(void __iomem *);
-extern unsigned int ioread16be(void __iomem *);
-extern unsigned int ioread32(void __iomem *);
-extern unsigned int ioread32be(void __iomem *);
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
#ifdef CONFIG_64BIT
-extern u64 ioread64(void __iomem *);
-extern u64 ioread64be(void __iomem *);
+extern u64 ioread64(const void __iomem *);
+extern u64 ioread64be(const void __iomem *);
#endif
#ifdef readq
@@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *);
#define ioread64_hi_lo ioread64_hi_lo
#define ioread64be_lo_hi ioread64be_lo_hi
#define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(void __iomem *addr);
-extern u64 ioread64_hi_lo(void __iomem *addr);
-extern u64 ioread64be_lo_hi(void __iomem *addr);
-extern u64 ioread64be_hi_lo(void __iomem *addr);
+extern u64 ioread64_lo_hi(const void __iomem *addr);
+extern u64 ioread64_hi_lo(const void __iomem *addr);
+extern u64 ioread64be_lo_hi(const void __iomem *addr);
+extern u64 ioread64be_hi_lo(const void __iomem *addr);
#endif
extern void iowrite8(u8, void __iomem *);
@@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
-extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
@@ -101,14 +101,13 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wt ioremap
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
+#ifndef ARCH_HAS_IOREMAP_NP
+/* See the comment in asm-generic/io.h about ioremap_np(). */
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
#endif
#include <asm-generic/pci_iomap.h>
diff --git a/include/asm-generic/kmap_size.h b/include/asm-generic/kmap_size.h
new file mode 100644
index 000000000000..6e36b2443ece
--- /dev/null
+++ b/include/asm-generic/kmap_size.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KMAP_SIZE_H
+#define _ASM_GENERIC_KMAP_SIZE_H
+
+/* For debug this provides guard pages between the maps */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL
+# define KM_MAX_IDX 33
+#else
+# define KM_MAX_IDX 16
+#endif
+
+#endif
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
deleted file mode 100644
index 9f95b7b63d19..000000000000
--- a/include/asm-generic/kmap_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_KMAP_TYPES_H
-#define _ASM_GENERIC_KMAP_TYPES_H
-
-#ifdef __WITH_KM_FENCE
-# define KM_TYPE_NR 41
-#else
-# define KM_TYPE_NR 20
-#endif
-
-#endif
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
index 4a982089c95c..060eab094e5a 100644
--- a/include/asm-generic/kprobes.h
+++ b/include/asm-generic/kprobes.h
@@ -10,11 +10,11 @@
*/
# define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
- __attribute__((__section__("_kprobe_blacklist"))) \
+ __section("_kprobe_blacklist") \
_kbl_addr_##fname = (unsigned long)fname;
# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
/* Use this to forbid a kprobes attach on very low level functions */
-# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define __kprobes __section(".kprobes.text")
# define nokprobe_inline __always_inline
#else
# define NOKPROBE_SYMBOL(fname)
diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
new file mode 100644
index 000000000000..2a82daf110f1
--- /dev/null
+++ b/include/asm-generic/kvm_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_TYPES_H
+#define _ASM_GENERIC_KVM_TYPES_H
+
+#endif
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000000..8a59b6e567df
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void volatile __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 7637fb46ba4f..a2c8ed60233a 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -6,47 +6,18 @@
#ifndef __ASSEMBLY__
+/*
+ * supports 3 memory models.
+ */
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#ifndef arch_pfn_to_nid
-#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
-#endif
-
-#ifndef arch_local_page_offset
-#define arch_local_page_offset(pfn, nid) \
- ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*
- * supports 3 memory models.
- */
-#if defined(CONFIG_FLATMEM)
-
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#define __pfn_to_page(pfn) \
-({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(__pfn); \
- NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
-})
-
-#define __page_to_pfn(pg) \
-({ const struct page *__pg = (pg); \
- struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
- (unsigned long)(__pg - __pgdat->node_mem_map) + \
- __pgdat->node_start_pfn; \
-})
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -70,7 +41,7 @@
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
-#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
/*
* Convert a physical address to a Page Frame Number and back
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
deleted file mode 100644
index 5ff0e5193f85..000000000000
--- a/include/asm-generic/mm-arch-hooks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Architecture specific mm hooks
- */
-
-#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
-#define _ASM_GENERIC_MM_ARCH_HOOKS_H
-
-/*
- * This file should be included through arch/../include/asm/Kbuild for
- * the architecture which doesn't need specific mm hooks.
- *
- * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
- * are used.
- */
-
-#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
index 9439ff037b2d..5698fca3bf56 100644
--- a/include/asm-generic/mmiowb.h
+++ b/include/asm-generic/mmiowb.h
@@ -27,7 +27,7 @@
#include <asm/smp.h>
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
-#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
#else
#define __mmiowb_state() arch_mmiowb_state()
#endif /* arch_mmiowb_state */
@@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
static inline void mmiowb_set_pending(void)
{
struct mmiowb_state *ms = __mmiowb_state();
- ms->mmiowb_pending = ms->nesting_count;
+
+ if (likely(ms->nesting_count))
+ ms->mmiowb_pending = ms->nesting_count;
}
static inline void mmiowb_spin_lock(void)
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
index 6be9106fb6fb..91727065bacb 100644
--- a/include/asm-generic/mmu_context.h
+++ b/include/asm-generic/mmu_context.h
@@ -3,44 +3,74 @@
#define __ASM_GENERIC_MMU_CONTEXT_H
/*
- * Generic hooks for NOMMU architectures, which do not need to do
- * anything special here.
+ * Generic hooks to implement no-op functionality.
*/
-#include <asm-generic/mm_hooks.h>
-
struct task_struct;
struct mm_struct;
+/*
+ * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode.
+ *
+ * @mm: the currently active mm context which is becoming lazy
+ * @tsk: task which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+#ifndef enter_lazy_tlb
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
+#endif
+/**
+ * init_new_context - Initialize context of a new mm_struct.
+ * @tsk: task struct for the mm
+ * @mm: the new mm struct
+ * @return: 0 on success, -errno on failure
+ */
+#ifndef init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
return 0;
}
+#endif
+/**
+ * destroy_context - Undo init_new_context when the mm is going away
+ * @mm: old mm struct
+ */
+#ifndef destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
}
+#endif
-static inline void deactivate_mm(struct task_struct *task,
- struct mm_struct *mm)
-{
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
+/**
+ * activate_mm - called after exec switches the current task to a new mm, to switch to it
+ * @prev_mm: previous mm of this task
+ * @next_mm: new mm
+ */
+#ifndef activate_mm
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
{
+ switch_mm(prev_mm, next_mm, current);
}
+#endif
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
+/**
+ * dectivate_mm - called when an mm is released after exit or exec switches away from it
+ * @tsk: the task
+ * @mm: the old mm
+ */
+#ifndef deactivate_mm
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
{
}
+#endif
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h
new file mode 100644
index 000000000000..f210d5c1b78b
--- /dev/null
+++ b/include/asm-generic/module.lds.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_MODULE_LDS_H
+#define __ASM_GENERIC_MODULE_LDS_H
+
+/*
+ * <asm/module.lds.h> can specify arch-specific sections for linking modules.
+ * Empty for the asm-generic header.
+ */
+
+#endif /* __ASM_GENERIC_MODULE_LDS_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index b3f1082cc435..bfb9eb9d7215 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -22,38 +22,99 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
+#include <linux/nmi.h>
#include <asm/ptrace.h>
#include <asm/hyperv-tlfs.h>
struct ms_hyperv_info {
u32 features;
+ u32 priv_high;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
+ u32 isolation_config_a;
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
+
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+extern bool hv_isolation_type_snp(void);
+
+/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
+static inline int hv_result(u64 status)
+{
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+static inline bool hv_result_success(u64 status)
+{
+ return hv_result(status) == HV_STATUS_SUCCESS;
+}
+
+static inline unsigned int hv_repcomp(u64 status)
+{
+ /* Bits [43:32] of status have 'Reps completed' data. */
+ return (status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ u64 control = code;
+ u64 status;
+ u16 rep_comp;
+
+ control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hv_do_hypercall(control, input, output);
+ if (!hv_result_success(status))
+ return status;
+
+ rep_comp = hv_repcomp(status);
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ touch_nmi_watchdog();
+ } while (rep_comp < rep_count);
+
+ return status;
+}
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
- __u64 d_info2)
+static inline u64 hv_generate_guest_id(u64 kernel_version)
{
- __u64 guest_id = 0;
+ u64 guest_id;
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (d_info1 << 48);
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
- guest_id |= d_info2;
return guest_id;
}
-
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
@@ -85,20 +146,25 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
* possibly deliver another msg from the
* hypervisor
*/
- hv_signal_eom();
+ hv_set_register(HV_REGISTER_EOM, 0);
}
}
-void hv_setup_vmbus_irq(void (*handler)(void));
-void hv_remove_vmbus_irq(void);
-void hv_enable_vmbus_irq(void);
-void hv_disable_vmbus_irq(void);
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
+void hv_setup_stimer0_handler(void (*handler)(void));
+void hv_remove_stimer0_handler(void);
void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+
+extern bool hv_root_partition;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
@@ -109,9 +175,20 @@ void hv_remove_crash_handler(void);
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
+extern u64 (*hv_read_reference_counter)(void);
+
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
+int __init hv_common_init(void);
+void __init hv_common_free(void);
+int hv_common_cpu_init(unsigned int cpu);
+int hv_common_cpu_die(unsigned int cpu);
+
+void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
+void hv_free_hyperv_page(unsigned long addr);
+
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
@@ -128,10 +205,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number];
}
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
- const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool exclude_self)
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int this_cpu = smp_processor_id();
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
@@ -149,6 +228,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
+ if (exclude_self && cpu == this_cpu)
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
@@ -163,21 +244,40 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank;
}
-void hyperv_report_panic(struct pt_regs *regs, long err);
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ WARN_ON_ONCE(preemptible());
+ return __cpumask_to_vpset(vpset, cpus, true);
+}
+
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
+enum hv_isolation_type hv_get_isolation_type(void);
+bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
-void hv_setup_sched_clock(void *sched_clock);
+bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
+void *hv_map_memory(void *addr, unsigned long size);
+void hv_unmap_memory(void *addr);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
#endif /* CONFIG_HYPERV */
-#if IS_ENABLED(CONFIG_HYPERV)
-extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
-extern void hv_remove_stimer0_irq(int irq);
-#endif
-
#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index e6795f088bdd..bf910d47e900 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
#endif
@@ -22,12 +24,18 @@ struct msi_desc;
typedef struct msi_alloc_info {
struct msi_desc *desc;
irq_hw_number_t hwirq;
+ unsigned long flags;
union {
unsigned long ul;
void *ptr;
} scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
} msi_alloc_info_t;
+/* Device generating MSIs is proxying for another device */
+#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+
#define GENERIC_MSI_DOMAIN_OPS 1
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
#endif
diff --git a/include/asm-generic/nommu_context.h b/include/asm-generic/nommu_context.h
new file mode 100644
index 000000000000..4f916f9e16cd
--- /dev/null
+++ b/include/asm-generic/nommu_context.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NOMMU_H
+#define __ASM_GENERIC_NOMMU_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+#include <asm-generic/mm_hooks.h>
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_GENERIC_NOMMU_H */
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
new file mode 100644
index 000000000000..1a3ad6d29833
--- /dev/null
+++ b/include/asm-generic/numa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NUMA_H
+#define __ASM_GENERIC_NUMA_H
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+int __node_distance(int from, int to);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern bool numa_off;
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+void numa_clear_node(unsigned int cpu);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ return node_to_cpumask_map[node];
+}
+#endif
+
+void __init arch_numa_init(void);
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_free_distance(void);
+void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
+
+#else /* CONFIG_NUMA */
+
+static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void arch_numa_init(void) { }
+static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+
+#endif /* CONFIG_NUMA */
+
+#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index fe801f01625e..6fc47561814c 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -63,11 +63,7 @@ extern unsigned long memory_end;
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
-#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
-#else
#define PAGE_OFFSET (0)
-#endif
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 6bb3cd3d695a..6869f1061528 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -1,17 +1,30 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
+/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
{
- return channel ? 15 : 14;
+ /* always show the domain in /proc */
+ return 1;
}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+#endif /* CONFIG_PCI_DOMAINS */
-#endif /* _ASM_GENERIC_PCI_H */
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index d4f16dcc2ed7..8fbb0a55545d 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,12 +18,15 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
#else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif
@@ -50,6 +53,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
-#endif /* __ASM_GENERIC_IO_H */
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 35e4a53b83e6..6432a7fade91 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -114,21 +114,21 @@ do { \
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) __ret; \
+ typeof(pcp) ___ret; \
preempt_disable_notrace(); \
- __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
+ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
- __ret; \
+ ___ret; \
})
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) __ret; \
- unsigned long __flags; \
- raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_read(pcp); \
- raw_local_irq_restore(__flags); \
- __ret; \
+ typeof(pcp) ___ret; \
+ unsigned long ___flags; \
+ raw_local_irq_save(___flags); \
+ ___ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(___flags); \
+ ___ret; \
})
#define this_cpu_generic_read(pcp) \
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 73f7421413cb..977bea16cf1b 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -102,6 +102,98 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
__free_page(pte_page);
}
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
+/**
+ * pmd_alloc_one - allocate a page for PMD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocations use %GFP_PGTABLE_USER in user context and
+ * %GFP_PGTABLE_KERNEL in kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ page = alloc_pages(gfp, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_FREE
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((unsigned long)pmd);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (pud_t *)get_zeroed_page(gfp);
+}
+
+#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
+/**
+ * pud_alloc_one - allocate a page for PUD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page using %GFP_PGTABLE_USER for user context and
+ * %GFP_PGTABLE_KERNEL for kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return __pud_alloc_one(mm, addr);
+}
+#endif
+
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ free_page((unsigned long)pud);
+}
+
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#ifndef __HAVE_ARCH_PGD_FREE
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
deleted file mode 100644
index 829bdb0d6327..000000000000
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PGTABLE_NOP4D_HACK_H
-#define _PGTABLE_NOP4D_HACK_H
-
-#ifndef __ASSEMBLY__
-#include <asm-generic/5level-fixup.h>
-
-#define __PAGETABLE_PUD_FOLDED 1
-
-/*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
- * without casting.
- */
-typedef struct { pgd_t pgd; } pud_t;
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PTRS_PER_PUD 1
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgd) { }
-#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
-
-#define pgd_populate(mm, pgd, pud) do { } while (0)
-#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
-/*
- * (puds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
-
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
-{
- return (pud_t *)pgd;
-}
-
-#define pud_val(x) (pgd_val((x).pgd))
-#define __pud(x) ((pud_t) { __pgd(x) })
-
-#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
-#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
-
-/*
- * allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
- */
-#define pud_alloc_one(mm, address) NULL
-#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, a) do { } while (0)
-
-#undef pud_addr_end
-#define pud_addr_end(addr, end) (end)
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index ce2cbb3c380f..03b7dae47dd4 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -9,7 +9,6 @@
typedef struct { pgd_t pgd; } p4d_t;
#define P4D_SHIFT PGDIR_SHIFT
-#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define P4D_SIZE (1UL << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
@@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
#define __p4d(x) ((p4d_t) { __pgd(x) })
#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
-#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
/*
* allocating and freeing a p4d is trivial: the 1-entry p4d is
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 0d9b28cba16d..8ffd64e7a24c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t;
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_user(pud_t pud) { return 0; }
+static inline int pud_leaf(pud_t pud) { return 0; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
@@ -45,12 +47,13 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
-#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index d3776cb494c0..eb70c6d7ceff 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -4,9 +4,6 @@
#ifndef __ASSEMBLY__
-#ifdef __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nop4d-hack.h>
-#else
#include <asm-generic/pgtable-nop4d.h>
#define __PAGETABLE_PUD_FOLDED 1
@@ -46,12 +43,13 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return (pud_t *)p4d;
}
+#define pud_offset pud_offset
#define pud_val(x) (p4d_val((x).p4d))
#define __pud(x) ((pud_t) { __p4d(x) })
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
-#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
@@ -65,5 +63,4 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define pud_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
-#endif /* !__ARCH_USE_5LEVEL_HACK */
#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
deleted file mode 100644
index e2e2bef07dd2..000000000000
--- a/include/asm-generic/pgtable.h
+++ /dev/null
@@ -1,1261 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#include <linux/pfn.h>
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_MMU
-
-#include <linux/mm_types.h>
-#include <linux/bug.h>
-#include <linux/errno.h>
-
-#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
- defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
-#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
-#endif
-
-/*
- * On almost all architectures and configurations, 0 can be used as the
- * upper ceiling to free_pgtables(): on many architectures it has the same
- * effect as using TASK_SIZE. However, there is one configuration which
- * must impose a more careful limit, to avoid freeing kernel pgtables.
- */
-#ifndef USER_PGTABLES_CEILING
-#define USER_PGTABLES_CEILING 0UL
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty);
-extern int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty);
-#else
-static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-static inline int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- int r = 1;
- if (!pte_young(pte))
- r = 0;
- else
- set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
- return r;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- int r = 1;
- if (!pmd_young(pmd))
- r = 0;
- else
- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
- return r;
-}
-#else
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-/*
- * Despite relevant to THP only, this API is called from generic rmap code
- * under PageTransHuge(), hence needs a dummy implementation for !THP
- */
-static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- pmd_clear(pmdp);
- return pmd;
-}
-#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
-static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pud_t *pudp)
-{
- pud_t pud = *pudp;
-
- pud_clear(pudp);
- return pud;
-}
-#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp,
- int full)
-{
- return pmdp_huge_get_and_clear(mm, address, pmdp);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pud_t *pudp,
- int full)
-{
- return pudp_huge_get_and_clear(mm, address, pudp);
-}
-#endif
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pte_t *ptep,
- int full)
-{
- pte_t pte;
- pte = ptep_get_and_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-/*
- * Some architectures may be able to avoid expensive synchronization
- * primitives when modifications are made to PTE's which are already
- * not present, or in the process of an address space destruction.
- */
-#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-static inline void pte_clear_not_present_full(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep,
- int full)
-{
- pte_clear(mm, address, ptep);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
-extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp);
-extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pud_t *pudp);
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-struct mm_struct;
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-#ifndef pte_savedwrite
-#define pte_savedwrite pte_write
-#endif
-
-#ifndef pte_mk_savedwrite
-#define pte_mk_savedwrite pte_mkwrite
-#endif
-
-#ifndef pte_clear_savedwrite
-#define pte_clear_savedwrite pte_wrprotect
-#endif
-
-#ifndef pmd_savedwrite
-#define pmd_savedwrite pmd_write
-#endif
-
-#ifndef pmd_mk_savedwrite
-#define pmd_mk_savedwrite pmd_mkwrite
-#endif
-
-#ifndef pmd_clear_savedwrite
-#define pmd_clear_savedwrite pmd_wrprotect
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
-}
-#else
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- pud_t old_pud = *pudp;
-
- set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
-}
-#else
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-#endif
-
-#ifndef pmdp_collapse_flush
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return *pmdp;
-}
-#define pmdp_collapse_flush pmdp_collapse_flush
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * This is an implementation of pmdp_establish() that is only suitable for an
- * architecture that doesn't have hardware dirty/accessed bits. In this case we
- * can't race with CPU which sets these bits and non-atomic aproach is fine.
- */
-static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp, pmd_t pmd)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(vma->vm_mm, address, pmdp, pmd);
- return old_pmd;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t pte_a, pte_t pte_b)
-{
- return pte_val(pte_a) == pte_val(pte_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_UNUSED
-/*
- * Some architectures provide facilities to virtualization guests
- * so that they can flag allocated pages as unused. This allows the
- * host to transparently reclaim unused pages. This function returns
- * whether the pte's page is unused.
- */
-static inline int pte_unused(pte_t pte)
-{
- return 0;
-}
-#endif
-
-#ifndef pte_access_permitted
-#define pte_access_permitted(pte, write) \
- (pte_present(pte) && (!(write) || pte_write(pte)))
-#endif
-
-#ifndef pmd_access_permitted
-#define pmd_access_permitted(pmd, write) \
- (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
-#endif
-
-#ifndef pud_access_permitted
-#define pud_access_permitted(pud, write) \
- (pud_present(pud) && (!(write) || pud_write(pud)))
-#endif
-
-#ifndef p4d_access_permitted
-#define p4d_access_permitted(p4d, write) \
- (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
-#endif
-
-#ifndef pgd_access_permitted
-#define pgd_access_permitted(pgd, write) \
- (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
-#endif
-
-#ifndef __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
- return pmd_val(pmd_a) == pmd_val(pmd_b);
-}
-
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
-{
- return pud_val(pud_a) == pud_val(pud_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_P4D_SAME
-static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
-{
- return p4d_val(p4d_a) == p4d_val(p4d_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_SAME
-static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
-{
- return pgd_val(pgd_a) == pgd_val(pgd_b);
-}
-#endif
-
-/*
- * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
- * TLB flush will be required as a result of the "set". For example, use
- * in scenarios where it is known ahead of time that the routine is
- * setting non-present entries, or re-setting an existing entry to the
- * same value. Otherwise, use the typical "set" helpers and flush the
- * TLB.
- */
-#define set_pte_safe(ptep, pte) \
-({ \
- WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
- set_pte(ptep, pte); \
-})
-
-#define set_pmd_safe(pmdp, pmd) \
-({ \
- WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
- set_pmd(pmdp, pmd); \
-})
-
-#define set_pud_safe(pudp, pud) \
-({ \
- WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
- set_pud(pudp, pud); \
-})
-
-#define set_p4d_safe(p4dp, p4d) \
-({ \
- WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
- set_p4d(p4dp, p4d); \
-})
-
-#define set_pgd_safe(pgdp, pgd) \
-({ \
- WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
- set_pgd(pgdp, pgd); \
-})
-
-#ifndef __HAVE_ARCH_DO_SWAP_PAGE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_do_swap_page() can restore this
- * metadata when a page is swapped back in.
- */
-static inline void arch_do_swap_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t pte, pte_t oldpte)
-{
-
-}
-#endif
-
-#ifndef __HAVE_ARCH_UNMAP_ONE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_unmap_one() can save this
- * metadata on a swap-out of a page.
- */
-static inline int arch_unmap_one(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t orig_pte)
-{
- return 0;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
-#ifndef __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) (pte)
-#endif
-
-#ifndef pte_accessible
-# define pte_accessible(mm, pte) ((void)(pte), 1)
-#endif
-
-#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
-#endif
-
-#ifndef pgprot_noncached
-#define pgprot_noncached(prot) (prot)
-#endif
-
-#ifndef pgprot_writecombine
-#define pgprot_writecombine pgprot_noncached
-#endif
-
-#ifndef pgprot_writethrough
-#define pgprot_writethrough pgprot_noncached
-#endif
-
-#ifndef pgprot_device
-#define pgprot_device pgprot_noncached
-#endif
-
-#ifndef pgprot_modify
-#define pgprot_modify pgprot_modify
-static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-{
- if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
- newprot = pgprot_noncached(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
- newprot = pgprot_writecombine(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
- newprot = pgprot_device(newprot);
- return newprot;
-}
-#endif
-
-/*
- * When walking page tables, get the address of the next boundary,
- * or the end address of the range if that comes earlier. Although no
- * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
- */
-
-#define pgd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-
-#ifndef p4d_addr_end
-#define p4d_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pud_addr_end
-#define pud_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pmd_addr_end
-#define pmd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-/*
- * When walking page tables, we usually want to skip any p?d_none entries;
- * and any p?d_bad entries - reporting the error before resetting to none.
- * Do the tests inline, but report and clear the bad entry in mm/memory.c.
- */
-void pgd_clear_bad(pgd_t *);
-
-#ifndef __PAGETABLE_P4D_FOLDED
-void p4d_clear_bad(p4d_t *);
-#else
-#define p4d_clear_bad(p4d) do { } while (0)
-#endif
-
-#ifndef __PAGETABLE_PUD_FOLDED
-void pud_clear_bad(pud_t *);
-#else
-#define pud_clear_bad(p4d) do { } while (0)
-#endif
-
-void pmd_clear_bad(pmd_t *);
-
-static inline int pgd_none_or_clear_bad(pgd_t *pgd)
-{
- if (pgd_none(*pgd))
- return 1;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_clear_bad(pgd);
- return 1;
- }
- return 0;
-}
-
-static inline int p4d_none_or_clear_bad(p4d_t *p4d)
-{
- if (p4d_none(*p4d))
- return 1;
- if (unlikely(p4d_bad(*p4d))) {
- p4d_clear_bad(p4d);
- return 1;
- }
- return 0;
-}
-
-static inline int pud_none_or_clear_bad(pud_t *pud)
-{
- if (pud_none(*pud))
- return 1;
- if (unlikely(pud_bad(*pud))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-static inline int pmd_none_or_clear_bad(pmd_t *pmd)
-{
- if (pmd_none(*pmd))
- return 1;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- /*
- * Get the current pte state, but zero it out to make it
- * non-present, preventing the hardware from asynchronously
- * updating it.
- */
- return ptep_get_and_clear(vma->vm_mm, addr, ptep);
-}
-
-static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- /*
- * The pte is non-present, so there's no hardware state to
- * preserve.
- */
- set_pte_at(vma->vm_mm, addr, ptep, pte);
-}
-
-#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-/*
- * Start a pte protection read-modify-write transaction, which
- * protects against asynchronous hardware modifications to the pte.
- * The intention is not to prevent the hardware from making pte
- * updates, but to prevent any updates it may make from being lost.
- *
- * This does not protect against other software modifications of the
- * pte; the appropriate pte lock must be held over the transation.
- *
- * Note that this interface is intended to be batchable, meaning that
- * ptep_modify_prot_commit may not actually update the pte, but merely
- * queue the update to be done at some later time. The update must be
- * actually committed before the pte lock is released, however.
- */
-static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- return __ptep_modify_prot_start(vma, addr, ptep);
-}
-
-/*
- * Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
- */
-static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t old_pte, pte_t pte)
-{
- __ptep_modify_prot_commit(vma, addr, ptep, pte);
-}
-#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
-#endif /* CONFIG_MMU */
-
-/*
- * No-op macros that just return the current protection value. Defined here
- * because these macros can be used used even if CONFIG_MMU is not defined.
- */
-#ifndef pgprot_encrypted
-#define pgprot_encrypted(prot) (prot)
-#endif
-
-#ifndef pgprot_decrypted
-#define pgprot_decrypted(prot) (prot)
-#endif
-
-/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
- * A facility to provide batching of the reload of page tables and
- * other process state with the actual context switch code for
- * paravirtualized guests. By convention, only one of the batched
- * update (lazy) modes (CPU, MMU) should be active at any given time,
- * entry should never be nested, and entry and exits should always be
- * paired. This is for sanity of maintaining and reasoning about the
- * kernel code. In this case, the exit (end of the context switch) is
- * in architecture-specific code, and so doesn't need a generic
- * definition.
- */
-#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
-#define arch_start_context_switch(prev) do {} while (0)
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
-#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
-static inline int pte_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pte_t pte_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-
-#ifndef __HAVE_PFNMAP_TRACKING
-/*
- * Interfaces that can be used by architecture code to keep track of
- * memory type of pfn mappings specified by the remap_pfn_range,
- * vmf_insert_pfn.
- */
-
-/*
- * track_pfn_remap is called when a _new_ pfn mapping is being established
- * by remap_pfn_range() for physical range indicated by pfn and size.
- */
-static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size)
-{
- return 0;
-}
-
-/*
- * track_pfn_insert is called when a _new_ single pfn is established
- * by vmf_insert_pfn().
- */
-static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
-{
-}
-
-/*
- * track_pfn_copy is called when vma that is covering the pfnmap gets
- * copied through copy_page_range().
- */
-static inline int track_pfn_copy(struct vm_area_struct *vma)
-{
- return 0;
-}
-
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
- */
-static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
-{
-}
-
-/*
- * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
- */
-static inline void untrack_pfn_moved(struct vm_area_struct *vma)
-{
-}
-#else
-extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size);
-extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
-extern int track_pfn_copy(struct vm_area_struct *vma);
-extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
-extern void untrack_pfn_moved(struct vm_area_struct *vma);
-#endif
-
-#ifdef __HAVE_COLOR_ZERO_PAGE
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-}
-
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
-
-#else
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- return pfn == zero_pfn;
-}
-
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- extern unsigned long zero_pfn;
- return zero_pfn;
-}
-#endif
-
-#ifdef CONFIG_MMU
-
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_huge(pmd_t pmd)
-{
- return 0;
-}
-#ifndef pmd_write
-static inline int pmd_write(pmd_t pmd)
-{
- BUG();
- return 0;
-}
-#endif /* pmd_write */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif /* pud_write */
-
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
-#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
-static inline int pud_trans_huge(pud_t pud)
-{
- return 0;
-}
-#endif
-
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
-{
- pud_t pudval = READ_ONCE(*pud);
-
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
- return 1;
- if (unlikely(pud_bad(pudval))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef pmd_read_atomic
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- /*
- * Depend on compiler for an atomic pmd read. NOTE: this is
- * only going to work, if the pmdval_t isn't larger than
- * an unsigned long.
- */
- return *pmdp;
-}
-#endif
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
-#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_sem in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_sem is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmd_read_atomic(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmd_read_atomic is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resovle dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preseved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
- return 0;
-#endif
-}
-
-#ifndef CONFIG_NUMA_BALANCING
-/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
- */
-static inline int pte_protnone(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
- return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-
-#ifndef __PAGETABLE_P4D_FOLDED
-int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
-int p4d_clear_huge(p4d_t *p4d);
-#else
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-#endif /* !__PAGETABLE_P4D_FOLDED */
-
-int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
-int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
-int pud_clear_huge(pud_t *pud);
-int pmd_clear_huge(pmd_t *pmd);
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
-int pud_free_pmd_page(pud_t *pud, unsigned long addr);
-int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-static inline int pud_clear_huge(pud_t *pud)
-{
- return 0;
-}
-static inline int pmd_clear_huge(pmd_t *pmd)
-{
- return 0;
-}
-static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
-static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
-{
- return 0;
-}
-static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * ARCHes with special requirements for evicting THP backing TLB entries can
- * implement this. Otherwise also, it can help optimize normal TLB flush in
- * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshold, which will
- * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
- * e.g. see arch/arc: flush_pmd_tlb_range
- */
-#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#else
-#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
-#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
-#endif
-#endif
-
-struct file;
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
-
-#ifndef CONFIG_X86_ESPFIX64
-static inline void init_espfix_bsp(void) { }
-#endif
-
-extern void __init pgtable_cache_init(void);
-
-#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
-static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
-{
- return true;
-}
-
-static inline bool arch_has_pfn_modify_check(void)
-{
- return false;
-}
-#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
-
-/*
- * Architecture PAGE_KERNEL_* fallbacks
- *
- * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
- * because they really don't support them, or the port needs to be updated to
- * reflect the required functionality. Below are a set of relatively safe
- * fallbacks, as best effort, which we can count on in lieu of the architectures
- * not defining them on their own yet.
- */
-
-#ifndef PAGE_KERNEL_RO
-# define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
-#ifndef PAGE_KERNEL_EXEC
-# define PAGE_KERNEL_EXEC PAGE_KERNEL
-#endif
-
-#endif /* !__ASSEMBLY__ */
-
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
-#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
-#endif
-
-/*
- * On some architectures it depends on the mm if the p4d/pud or pmd
- * layer of the page table hierarchy is folded or not.
- */
-#ifndef mm_p4d_folded
-#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
-#endif
-
-#ifndef mm_pud_folded
-#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
-#endif
-
-#ifndef mm_pmd_folded
-#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
-#endif
-
-/*
- * p?d_leaf() - true if this entry is a final mapping to a physical address.
- * This differs from p?d_huge() by the fact that they are always available (if
- * the architecture supports large pages at the appropriate level) even
- * if CONFIG_HUGETLB_PAGE is not defined.
- * Only meaningful when called on a valid entry.
- */
-#ifndef pgd_leaf
-#define pgd_leaf(x) 0
-#endif
-#ifndef p4d_leaf
-#define p4d_leaf(x) 0
-#endif
-#ifndef pud_leaf
-#define pud_leaf(x) 0
-#endif
-#ifndef pmd_leaf
-#define pmd_leaf(x) 0
-#endif
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
new file mode 100644
index 000000000000..828966d4c281
--- /dev/null
+++ b/include/asm-generic/pgtable_uffd.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
+#define _ASM_GENERIC_PGTABLE_UFFD_H
+
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static __always_inline int pte_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline int pmd_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
+#endif /* _ASM_GENERIC_PGTABLE_UFFD_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d683f5e6d791..b4d43a4af5f7 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 3aefde23dcea..75b8f4601b28 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -2,6 +2,10 @@
/*
* Queue read/write lock
*
+ * These use generic atomic and locking routines, but depend on a fair spinlock
+ * implementation in order to be fair themselves. The implementation in
+ * asm-generic/spinlock.h meets these requirements.
+ *
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <waiman.long@hp.com>
@@ -15,6 +19,8 @@
#include <asm-generic/qrwlock_types.h>
+/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
+
/*
* Writer states & reader shift and bias.
*/
@@ -31,13 +37,13 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_read_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
@@ -50,13 +56,13 @@ static inline int queued_read_trylock(struct qrwlock *lock)
}
/**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_write_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (unlikely(cnts))
@@ -66,12 +72,12 @@ static inline int queued_write_trylock(struct qrwlock *lock)
_QW_LOCKED));
}
/**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
*/
static inline void queued_read_lock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
@@ -82,12 +88,12 @@ static inline void queued_read_lock(struct qrwlock *lock)
}
/**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
- u32 cnts = 0;
+ int cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;
@@ -96,8 +102,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
}
/**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_read_unlock(struct qrwlock *lock)
{
@@ -108,23 +114,34 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
smp_store_release(&lock->wlocked, 0);
}
+/**
+ * queued_rwlock_is_contended - check if the lock is contended
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static inline int queued_rwlock_is_contended(struct qrwlock *lock)
+{
+ return arch_spin_is_locked(&lock->wait_lock);
+}
+
/*
* Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
*/
-#define arch_read_lock(l) queued_read_lock(l)
-#define arch_write_lock(l) queued_write_lock(l)
-#define arch_read_trylock(l) queued_read_trylock(l)
-#define arch_write_trylock(l) queued_write_trylock(l)
-#define arch_read_unlock(l) queued_read_unlock(l)
-#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5a2572..12392c14c4d0 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
#include <asm/spinlock_types.h>
/*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
*/
typedef struct qrwlock {
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fde943d180e0..995513fa2690 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -2,6 +2,35 @@
/*
* Queued spinlock
*
+ * A 'generic' spinlock implementation that is based on MCS locks. For an
+ * architecture that's looking for a 'generic' spinlock, please first consider
+ * ticket-lock.h and only come looking here when you've considered all the
+ * constraints below and can show your hardware does actually perform better
+ * with qspinlock.
+ *
+ * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
+ * weaker than RCtso if you're power), where regular code only expects atomic_t
+ * to be RCpc.
+ *
+ * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
+ * of atomic operations to behave well together, please audit them carefully to
+ * ensure they all have forward progress. Many atomic operations may default to
+ * cmpxchg() loops which will not have good forward progress properties on
+ * LL/SC architectures.
+ *
+ * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
+ * do. Carefully read the patches that introduced
+ * queued_fetch_set_pending_acquire().
+ *
+ * qspinlock also heavily relies on mixed size atomic operations, in specific
+ * it requires architectures to have xchg16; something which many LL/SC
+ * architectures need to implement as a 32bit and+or in order to satisfy the
+ * forward progress guarantees mentioned above.
+ *
+ * Further reading on mixed size atomics that might be relevant:
+ *
+ * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
+ *
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
@@ -11,7 +40,9 @@
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <linux/atomic.h>
+#ifndef queued_spin_is_locked
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
@@ -25,6 +56,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -57,7 +89,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- u32 val = atomic_read(&lock->val);
+ int val = atomic_read(&lock->val);
if (unlikely(val))
return 0;
@@ -67,19 +99,21 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val = 0;
+ int val = 0;
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
}
+#endif
#ifndef queued_spin_unlock
/**
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 56d1309d32f8..2fd1fb89ec36 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -9,15 +9,7 @@
#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
#define __ASM_GENERIC_QSPINLOCK_TYPES_H
-/*
- * Including atomic.h with PARAVIRT on will cause compilation errors because
- * of recursive header file incluson via paravirt_types.h. So don't include
- * it if PARAVIRT is on.
- */
-#ifndef CONFIG_PARAVIRT
#include <linux/types.h>
-#include <linux/atomic.h>
-#endif
typedef struct qspinlock {
union {
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
new file mode 100644
index 000000000000..8d0a6280e982
--- /dev/null
+++ b/include/asm-generic/rwonce.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#ifndef __ASM_GENERIC_RWONCE_H
+#define __ASM_GENERIC_RWONCE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler_types.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity. Note that this may result in tears!
+ */
+#ifndef __READ_ONCE
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+#endif
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ (typeof(x))__read_once_word_nocheck(&(x)); \
+})
+
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_RWONCE_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 1321ac7821d7..6b6f42bc58f9 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -33,7 +33,7 @@ static inline const int *get_compat_mode1_syscalls(void)
static const int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32,
__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
- 0, /* null terminated */
+ -1, /* negative terminated */
};
return mode1_syscalls_32;
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d1779d442aa5..db13bb620f52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -53,43 +53,29 @@ extern char __ctors_start[], __ctors_end[];
/* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[];
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
-#ifndef dereference_function_descriptor
-#define dereference_function_descriptor(p) (p)
-#define dereference_kernel_function_descriptor(p) (p)
-#endif
-
-/* random extra sections (if any). Override
- * in asm/sections.h */
-#ifndef arch_is_kernel_text
-static inline int arch_is_kernel_text(unsigned long addr)
-{
- return 0;
-}
-#endif
-
-#ifndef arch_is_kernel_data
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return 0;
-}
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+void *dereference_function_descriptor(void *ptr);
+void *dereference_kernel_function_descriptor(void *ptr);
+#else
+#define dereference_function_descriptor(p) ((void *)(p))
+#define dereference_kernel_function_descriptor(p) ((void *)(p))
+
+/* An address is simply the address of the function. */
+typedef struct {
+ unsigned long addr;
+} func_desc_t;
#endif
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+static inline bool have_function_descriptors(void)
{
- return 0;
+ return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
}
-#endif
/**
* memory_contains - checks if an object is contained within a memory region
@@ -111,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -124,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
@@ -156,6 +145,28 @@ static inline bool init_section_intersects(void *virt, size_t size)
}
/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data or .bss section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data or .bss, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
+}
+
+/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
@@ -169,4 +180,51 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index c53984fa9761..663dd6d0795d 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -5,8 +5,6 @@
#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-#ifdef SA_RESTORER
-#endif
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
new file mode 100644
index 000000000000..2a67aed9ac52
--- /dev/null
+++ b/include/asm-generic/softirq_stack.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+#define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
+#endif
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index adaf6acab172..fdfebcb050f4 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -1,12 +1,92 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_SPINLOCK_H
-#define __ASM_GENERIC_SPINLOCK_H
+
/*
- * You need to implement asm/spinlock.h for SMP support. The generic
- * version does not handle SMP.
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
*/
-#ifdef CONFIG_SMP
-#error need an architecture specific asm/spinlock.h
-#endif
+
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, lock);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(lock, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(lock);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(lock);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return ((val >> 16) != (val & 0xffff));
+}
+
+static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return !arch_spin_is_locked(&lock);
+}
+
+#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
new file mode 100644
index 000000000000..8962bb730945
--- /dev/null
+++ b/include/asm-generic/spinlock_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
+#define __ASM_GENERIC_SPINLOCK_TYPES_H
+
+#include <linux/types.h>
+typedef atomic_t arch_spinlock_t;
+
+/*
+ * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the
+ * include.
+ */
+#include <asm/qrwlock_types.h>
+
+#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0)
+
+#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index f3135e734387..5a80fe728dc8 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -43,9 +43,9 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
* @regs: task_pt_regs() of @task
*
* It's only valid to call this when @task is stopped for system
- * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
- * after tracehook_report_syscall_entry() returned nonzero to prevent
- * the system call from taking place.
+ * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry()
+ * returned nonzero to prevent the system call from taking place.
*
* This rolls back the register state in @regs so it's as if the
* system call instruction was a no-op. The registers containing
@@ -63,7 +63,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
* Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
@@ -76,7 +77,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
* This value is meaningless if syscall_get_error() returned nonzero.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
@@ -93,7 +95,8 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
* code; the user sees a failed system call with this errno code.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val);
@@ -108,34 +111,21 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args);
/**
- * syscall_set_arguments - change system call parameter value
- * @task: task of interest, must be in system call entry tracing
- * @regs: task_pt_regs() of @task
- * @args: array of argument values to store
- *
- * Changes 6 arguments to the system call.
- * The first argument gets value @args[0], and so on.
- *
- * It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- */
-void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- const unsigned long *args);
-
-/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
* @task: task of interest, must be blocked
*
* Returns the AUDIT_ARCH_* based on the system call convention in use.
*
* It's only valid to call this when @task is stopped on entry to a system
- * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ * call, due to %SYSCALL_WORK_SYSCALL_TRACE, %SYSCALL_WORK_SYSCALL_AUDIT, or
+ * %SYSCALL_WORK_SECCOMP.
*
* Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
* provide an implementation of this.
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
deleted file mode 100644
index 59c5a3bd4a6e..000000000000
--- a/include/asm-generic/termios-base.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* termios.h: generic termios/termio user copying/translation
- */
-
-#ifndef _ASM_GENERIC_TERMIOS_BASE_H
-#define _ASM_GENERIC_TERMIOS_BASE_H
-
-#include <linux/uaccess.h>
-
-#ifndef __ARCH_TERMIO_GETPUT
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
-
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
-
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __ARCH_TERMIO_GETPUT */
-
-#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
deleted file mode 100644
index b1398d0d4a1d..000000000000
--- a/include/asm-generic/termios.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_TERMIOS_H
-#define _ASM_GENERIC_TERMIOS_H
-
-
-#include <linux/uaccess.h>
-#include <uapi/asm-generic/termios.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- const struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifdef TCGETS2
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#else /* TCGETS2 */
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#endif /* TCGETS2 */
-
-#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f391f6b500b4..492dce43236e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -13,7 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
-#include <asm/pgalloc.h>
+#include <linux/hugetlb_inline.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -46,7 +46,9 @@
*
* The mmu_gather API consists of:
*
- * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ *
+ * start and finish a mmu_gather
*
* Finish in particular will issue a (final) TLB invalidate and free
* all (remaining) queued pages.
@@ -91,7 +93,7 @@
*
* - mmu_gather::fullmm
*
- * A flag set by tlb_gather_mmu() to indicate we're going to free
+ * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
* the entire mm; this allows a number of optimizations.
*
* - We can ignore tlb_{start,end}_vma(); because we don't
@@ -156,9 +158,24 @@
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
+ * MMU_GATHER_NO_FLUSH_CACHE
+ *
+ * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ * before unmapping a VMA.
+ *
+ * NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ * flush_cache_range() being a NOP, except Sparc64 seems to be
+ * different here.
+ *
+ * MMU_GATHER_MERGE_VMAS
+ *
+ * Indicates the architecture wants to merge ranges over VMAs; typical when
+ * multiple range invalidates are more expensive than a full invalidate.
+ *
* MMU_GATHER_NO_RANGE
*
- * Use this if your architecture lacks an efficient flush_tlb_range().
+ * Use this if your architecture lacks an efficient flush_tlb_range(). This
+ * option implies MMU_GATHER_MERGE_VMAS above.
*
* MMU_GATHER_NO_GATHER
*
@@ -178,7 +195,7 @@ struct mmu_table_batch {
struct rcu_head rcu;
#endif
unsigned int nr;
- void *tables[0];
+ void *tables[];
};
#define MAX_TABLE_BATCH \
@@ -225,7 +242,7 @@ struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
- struct page *pages[0];
+ struct page *pages[];
};
#define MAX_GATHER_BATCH \
@@ -286,6 +303,7 @@ struct mmu_gather {
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
+ unsigned int vma_pfn : 1;
unsigned int batch_count;
@@ -332,8 +350,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#ifdef CONFIG_MMU_GATHER_NO_RANGE
-#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
-#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
#endif
/*
@@ -350,20 +368,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
-static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
-
-#if defined(tlb_start_vma) || defined(tlb_end_vma)
-#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
-#endif
-
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
@@ -383,6 +390,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -398,19 +408,11 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
* We rely on tlb_end_vma() to issue a flush, such that when we reset
* these values the batch is empty.
*/
- tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+ tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
/*
@@ -484,32 +486,68 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
-#ifndef tlb_start_vma
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
#endif
+}
-#ifndef tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
- * Do a TLB flush and reset the range at VMA boundaries; this avoids
- * the ranges growing with the unused space between consecutive VMAs,
- * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
- * this.
+ * VM_PFNMAP is more fragile because the core mm will not track the
+ * page mapcount -- there might not be page-frames for these PFNs after
+ * all. Force flush TLBs for such ranges to avoid munmap() vs
+ * unmap_mapping_range() races.
*/
- tlb_flush_mmu_tlbonly(tlb);
+ if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+ }
+}
+
+/*
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
+ * and set corresponding cleared_*.
+ */
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_ptes = 1;
+}
+
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pmds = 1;
+}
+
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_puds = 1;
+}
+
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_p4ds = 1;
}
-#endif
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
@@ -524,19 +562,21 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->cleared_ptes = 1; \
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- __tlb_adjust_range(tlb, address, _sz); \
- if (_sz == PMD_SIZE) \
- tlb->cleared_pmds = 1; \
- else if (_sz == PUD_SIZE) \
- tlb->cleared_puds = 1; \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
+ tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -550,8 +590,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -565,8 +604,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -591,9 +629,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -601,9 +638,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -611,9 +647,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -627,6 +662,20 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
} while (0)
#endif
+#ifndef pte_needs_flush
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return true;
+}
+#endif
+
+#ifndef huge_pmd_needs_flush
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return true;
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 238873739550..4dbe715be65b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,10 +45,10 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #ifdef CONFIG_NUMA
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#endif
#ifndef pcibus_to_node
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index e935318804f8..a5be9e61a2a2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -8,100 +8,92 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/string.h>
+#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+#include <asm/unaligned.h>
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = *((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
}
- memcpy(to, (const void __force *)from, n);
- return 0;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
- memcpy((void __force *)to, from, n);
- return 0;
+ switch (size) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-#endif /* CONFIG_UACCESS_MEMCPY */
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef KERNEL_DS
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
-#endif
-
-#ifndef USER_DS
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-#endif
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef get_fs
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
- current_thread_info()->addr_limit = fs;
+ memcpy(to, (const void __force *)from, n);
+ return 0;
}
-#endif
-
-#ifndef segment_eq
-#define segment_eq(a, b) ((a).seg == (b).seg)
-#endif
-
-#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
-/*
- * The architecture should really override this if possible, at least
- * doing a check on the get_fs()
- */
-#ifndef __access_ok
-static inline int __access_ok(unsigned long addr, unsigned long size)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return 1;
+ memcpy((void __force *)to, from, n);
+ return 0;
}
-#endif
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
/*
* These are the main single-value transfer routines. They automatically
@@ -213,50 +205,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
/*
- * Copy a null terminated string from userspace.
- */
-#ifndef __strncpy_from_user
-static inline long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- char *tmp;
- strncpy(dst, (const char __force *)src, count);
- for (tmp = dst; *tmp && count > 0; tmp++, count--)
- ;
- return (tmp - dst);
-}
-#endif
-
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (!access_ok(src, 1))
- return -EFAULT;
- return __strncpy_from_user(dst, src, count);
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-#ifndef __strnlen_user
-#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
-#endif
-
-/*
- * Unlike strnlen, strnlen_user includes the nul terminator in
- * its returned count. Callers should check for a returned value
- * greater than N as an indication the string is too long.
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- if (!access_ok(src, 1))
- return 0;
- return __strnlen_user(src, n);
-}
-
-/*
* Zero Userspace
*/
#ifndef __clear_user
@@ -280,4 +228,8 @@ clear_user(void __user *to, unsigned long n)
#include <asm/extable.h>
+__must_check long strncpy_from_user(char *dst, const char __user *src,
+ long count);
+__must_check long strnlen_user(const char __user *src, long n);
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 374c940e9be1..699650f81970 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,31 +6,150 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
+{
+ *p++ = val >> 40;
+ *p++ = val >> 32;
+ *p++ = val >> 24;
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be48(const u64 val, void *p)
+{
+ __put_unaligned_be48(val, p);
+}
+
+static inline u64 __get_unaligned_be48(const u8 *p)
+{
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
+ p[3] << 16 | p[4] << 8 | p[5];
+}
+
+static inline u64 get_unaligned_be48(const void *p)
+{
+ return __get_unaligned_be48(p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index cec543d9e87b..c835607f78ae 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -11,20 +11,6 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
}
#endif /* __arch_get_k_vdso_data */
-#ifndef __arch_update_vdso_data
-static __always_inline bool __arch_update_vdso_data(void)
-{
- return true;
-}
-#endif /* __arch_update_vdso_data */
-
-#ifndef __arch_get_clock_mode
-static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
-{
- return 0;
-}
-#endif /* __arch_get_clock_mode */
-
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
struct timekeeper *tk)
diff --git a/include/asm-generic/vermagic.h b/include/asm-generic/vermagic.h
new file mode 100644
index 000000000000..084274a1219e
--- /dev/null
+++ b/include/asm-generic/vermagic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_GENERIC_VERMAGIC_H
+#define _ASM_GENERIC_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC ""
+
+#endif /* _ASM_GENERIC_VERMAGIC_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e00f41aa8ec4..3dc5824141cd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -34,6 +34,7 @@
*
* STABS_DEBUG
* DWARF_DEBUG
+ * ELF_DETAILS
*
* DISCARDS // must be the last
* }
@@ -89,15 +90,18 @@
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
*
+ * With LTO_CLANG, the linker also splits sections by default, so we need
+ * these macros to combine the sections during the final link.
+ *
* RODATA_MAIN is not used because existing code already defines .rodata.x
* sections to be brought in with rodata.
*/
-#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
-#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
@@ -109,12 +113,27 @@
#endif
/*
- * Align to a 32 byte boundary equal to the
- * alignment gcc 4.5 uses for a struct
+ * GCC 4.5 and later have a 32 bytes section alignment for structures.
+ * Except GCC 4.9, that feels the need to align on 64 bytes.
*/
#define STRUCT_ALIGNMENT 32
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+/*
+ * The order of the sched class addresses are important, as they are
+ * used to determine the order of the priority of each sched class in
+ * relation to each other.
+ */
+#define SCHED_DATA \
+ STRUCT_ALIGN(); \
+ __sched_class_highest = .; \
+ *(__stop_sched_class) \
+ *(__dl_sched_class) \
+ *(__rt_sched_class) \
+ *(__fair_sched_class) \
+ *(__idle_sched_class) \
+ __sched_class_lowest = .;
+
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
@@ -135,6 +154,24 @@
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
+#else
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
+#endif
+
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -142,19 +179,21 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- KEEP(*(__patchable_function_entries)) \
+ KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub;
+ FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# define MCOUNT_REC() FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
# endif
@@ -308,6 +347,7 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
@@ -320,9 +360,12 @@
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- __start___verbose = .; \
- KEEP(*(__verbose)) \
- __stop___verbose = .; \
+ __start___dyndbg_classes = .; \
+ KEEP(*(__dyndbg_classes)) \
+ __stop___dyndbg_classes = .; \
+ __start___dyndbg = .; \
+ KEEP(*(__dyndbg)) \
+ __stop___dyndbg = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -341,7 +384,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data..page_aligned)
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@@ -368,19 +412,50 @@
KEEP(*(__jump_table)) \
__stop___jump_table = .;
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+#define STATIC_CALL_DATA \
+ . = ALIGN(8); \
+ __start_static_call_sites = .; \
+ KEEP(*(.static_call_sites)) \
+ __stop_static_call_sites = .; \
+ __start_static_call_tramp_key = .; \
+ KEEP(*(.static_call_tramp_key)) \
+ __stop_static_call_tramp_key = .;
+#else
+#define STATIC_CALL_DATA
+#endif
+
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
+ STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ __start___kcfi_traps = .; \
+ KEEP(*(.kcfi_traps)) \
+ __stop___kcfi_traps = .; \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA(align) \
@@ -388,6 +463,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
+ SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
@@ -428,15 +504,11 @@
__end_pci_fixups_suspend_late = .; \
} \
\
- /* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- __start_builtin_fw = .; \
- KEEP(*(.builtin_fw)) \
- __end_builtin_fw = .; \
- } \
- \
+ FW_LOADER_BUILT_IN_DATA \
TRACEDATA \
\
+ PRINTK_INDEX \
+ \
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
__start___ksymtab = .; \
@@ -451,27 +523,6 @@
__stop___ksymtab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- __start___ksymtab_unused = .; \
- KEEP(*(SORT(___ksymtab_unused+*))) \
- __stop___ksymtab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- __start___ksymtab_unused_gpl = .; \
- KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
- __stop___ksymtab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- __start___ksymtab_gpl_future = .; \
- KEEP(*(SORT(___ksymtab_gpl_future+*))) \
- __stop___ksymtab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
__start___kcrctab = .; \
@@ -486,27 +537,6 @@
__stop___kcrctab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- __start___kcrctab_unused = .; \
- KEEP(*(SORT(___kcrctab_unused+*))) \
- __stop___kcrctab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- __start___kcrctab_unused_gpl = .; \
- KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
- __stop___kcrctab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- __start___kcrctab_gpl_future = .; \
- KEEP(*(SORT(___kcrctab_gpl_future+*))) \
- __stop___kcrctab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
@@ -533,12 +563,25 @@
__stop___modver = .; \
} \
\
+ KCFI_TRAPS \
+ \
RO_EXCEPTION_TABLE \
NOTES \
+ BTF \
\
. = ALIGN((align)); \
__end_rodata = .;
+
+/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
@@ -549,9 +592,14 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
@@ -602,6 +650,12 @@
*(.softirqentry.text) \
__softirqentry_text_end = .;
+#define STATIC_CALL_TEXT \
+ ALIGN_FUNCTION(); \
+ __static_call_text_start = .; \
+ *(.static_call.text) \
+ __static_call_text_end = .;
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text))
@@ -622,6 +676,24 @@
}
/*
+ * .BTF
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+#define BTF \
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
+ __start_BTF = .; \
+ KEEP(*(.BTF)) \
+ __stop_BTF = .; \
+ } \
+ . = ALIGN(4); \
+ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
+ *(.BTF_ids) \
+ }
+#else
+#define BTF
+#endif
+
+/*
* Init task
*/
#define INIT_TASK_DATA_SECTION(align) \
@@ -633,6 +705,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
__ctors_start = .; \
+ KEEP(*(SORT(.ctors.*))) \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
@@ -666,7 +739,8 @@
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE() \
- EARLY_LSM_TABLE()
+ EARLY_LSM_TABLE() \
+ KUNIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -712,7 +786,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \
@@ -756,18 +832,29 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
- .debug_addr 0 : { *(.debug_addr) }
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
- /* Stabs debugging sections. */
+/* Stabs debugging sections. */
#define STABS_DEBUG \
.stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \
- .stab.indexstr 0 : { *(.stab.indexstr) } \
- .comment 0 : { *(.comment) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+
+/* Required sections not related to debugging. */
+#define ELF_DETAILS \
+ .comment 0 : { *(.comment) } \
+ .symtab 0 : { *(.symtab) } \
+ .strtab 0 : { *(.strtab) } \
+ .shstrtab 0 : { *(.shstrtab) }
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
@@ -795,10 +882,11 @@
KEEP(*(.orc_unwind)) \
__stop_orc_unwind = .; \
} \
+ text_size = _etext - _stext; \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
orc_lookup = .; \
- . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
orc_lookup_end = .; \
}
@@ -806,6 +894,18 @@
#define ORC_UNWIND_TABLE
#endif
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ __start_builtin_fw = .; \
+ KEEP(*(.builtin_fw)) \
+ __end_builtin_fw = .; \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
@@ -818,6 +918,17 @@
#define TRACEDATA
#endif
+#ifdef CONFIG_PRINTK_INDEX
+#define PRINTK_INDEX \
+ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
+ __start_printk_index = .; \
+ *(.printk_index) \
+ __stop_printk_index = .; \
+ }
+#else
+#define PRINTK_INDEX
+#endif
+
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
__start_notes = .; \
@@ -856,6 +967,13 @@
KEEP(*(.con_initcall.init)) \
__con_initcall_end = .;
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_TABLE() \
+ . = ALIGN(8); \
+ __kunit_suites_start = .; \
+ KEEP(*(.kunit_test_suites)) \
+ __kunit_suites_end = .;
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -894,14 +1012,48 @@
* section definitions so that such archs put those in earlier section
* definitions.
*/
-#define DISCARDS \
- /DISCARD/ : { \
+#ifdef RUNTIME_DISCARD_EXIT
+#define EXIT_DISCARDS
+#else
+#define EXIT_DISCARDS \
EXIT_TEXT \
- EXIT_DATA \
- EXIT_CALL \
+ EXIT_DATA
+#endif
+
+/*
+ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
+ * -fsanitize=thread produce unwanted sections (.eh_frame
+ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
+ * keep any .init_array.* sections.
+ * https://bugs.llvm.org/show_bug.cgi?id=46478
+ */
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+# ifdef CONFIG_CONSTRUCTORS
+# define SANITIZER_DISCARDS \
+ *(.eh_frame)
+# else
+# define SANITIZER_DISCARDS \
+ *(.init_array) *(.init_array.*) \
+ *(.eh_frame)
+# endif
+#else
+# define SANITIZER_DISCARDS
+#endif
+
+#define COMMON_DISCARDS \
+ SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
*(.modinfo) \
+ /* ld.bfd warns about .gnu.version* even when not emitted */ \
+ *(.gnu.version*) \
+
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_DISCARDS \
+ EXIT_CALL \
+ COMMON_DISCARDS \
}
/**
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b62a2a56a4d4..44509d48fca2 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -8,7 +8,8 @@
#include <linux/prefetch.h>
static void
-xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;