aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu17
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/arm64/booting.rst37
-rw-r--r--Documentation/arm64/memory-tagging-extension.rst48
-rw-r--r--arch/arm64/Makefile7
-rw-r--r--arch/arm64/boot/Makefile8
-rw-r--r--arch/arm64/include/asm/cpufeature.h11
-rw-r--r--arch/arm64/include/asm/el2_setup.h13
-rw-r--r--arch/arm64/include/asm/fpsimd.h1
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h1
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h4
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/mmu.h29
-rw-r--r--arch/arm64/include/asm/mte-kasan.h17
-rw-r--r--arch/arm64/include/asm/mte.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/pointer_auth.h12
-rw-r--r--arch/arm64/include/asm/processor.h10
-rw-r--r--arch/arm64/include/asm/simd.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h47
-rw-r--r--arch/arm64/include/asm/tlbflush.h13
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c7
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S13
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/fpsimd.c14
-rw-r--r--arch/arm64/kernel/head.S11
-rw-r--r--arch/arm64/kernel/idreg-override.c2
-rw-r--r--arch/arm64/kernel/mte.c180
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/pointer_auth.c10
-rw-r--r--arch/arm64/kernel/process.c31
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kernel/signal.c5
-rw-r--r--arch/arm64/kernel/signal32.c6
-rw-r--r--arch/arm64/kernel/suspend.c1
-rw-r--r--arch/arm64/lib/insn.c4
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/proc.S3
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/uapi/linux/prctl.h11
-rw-r--r--lib/test_kasan.c2
-rw-r--r--mm/kasan/hw_tags.c8
-rw-r--r--mm/kasan/kasan.h22
44 files changed, 427 insertions, 298 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 160b10c029c0..edb19b31d710 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -640,3 +640,20 @@ Description: SPURR ticks for cpuX when it was idle.
This sysfs interface exposes the number of SPURR ticks
for cpuX when it was idle.
+
+What: /sys/devices/system/cpu/cpuX/mte_tcf_preferred
+Date: July 2021
+Contact: Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
+Description: Preferred MTE tag checking mode
+
+ When a user program specifies more than one MTE tag checking
+ mode, this sysfs node is used to specify which mode should
+ be preferred when scheduling a task on that CPU. Possible
+ values:
+
+ ================ ==============================================
+ "sync" Prefer synchronous mode
+ "async" Prefer asynchronous mode
+ ================ ==============================================
+
+ See also: Documentation/arm64/memory-tagging-extension.rst
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index bdb22006f713..6f257e39d89e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -380,6 +380,9 @@
arm64.nopauth [ARM64] Unconditionally disable Pointer Authentication
support
+ arm64.nomte [ARM64] Unconditionally disable Memory Tagging Extension
+ support
+
ataflop= [HW,M68k]
atarimouse= [HW,MOUSE] Atari Mouse
diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst
index a9192e7a231b..3f9d86557c5e 100644
--- a/Documentation/arm64/booting.rst
+++ b/Documentation/arm64/booting.rst
@@ -207,10 +207,17 @@ Before jumping into the kernel, the following conditions must be met:
software at a higher exception level to prevent execution in an UNKNOWN
state.
- - SCR_EL3.FIQ must have the same value across all CPUs the kernel is
- executing on.
- - The value of SCR_EL3.FIQ must be the same as the one present at boot
- time whenever the kernel is executing.
+ For all systems:
+ - If EL3 is present:
+
+ - SCR_EL3.FIQ must have the same value across all CPUs the kernel is
+ executing on.
+ - The value of SCR_EL3.FIQ must be the same as the one present at boot
+ time whenever the kernel is executing.
+
+ - If EL3 is present and the kernel is entered at EL2:
+
+ - SCR_EL3.HCE (bit 8) must be initialised to 0b1.
For systems with a GICv3 interrupt controller to be used in v3 mode:
- If EL3 is present:
@@ -311,6 +318,28 @@ Before jumping into the kernel, the following conditions must be met:
- ZCR_EL2.LEN must be initialised to the same value for all CPUs the
kernel will execute on.
+ For CPUs with the Scalable Matrix Extension (FEAT_SME):
+
+ - If EL3 is present:
+
+ - CPTR_EL3.ESM (bit 12) must be initialised to 0b1.
+
+ - SCR_EL3.EnTP2 (bit 41) must be initialised to 0b1.
+
+ - SMCR_EL3.LEN must be initialised to the same value for all CPUs the
+ kernel will execute on.
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - CPTR_EL2.TSM (bit 12) must be initialised to 0b0.
+
+ - CPTR_EL2.SMEN (bits 25:24) must be initialised to 0b11.
+
+ - SCTLR_EL2.EnTP2 (bit 60) must be initialised to 0b1.
+
+ - SMCR_EL2.LEN must be initialised to the same value for all CPUs the
+ kernel will execute on.
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level. Where the values documented
diff --git a/Documentation/arm64/memory-tagging-extension.rst b/Documentation/arm64/memory-tagging-extension.rst
index b540178a93f8..7b99c8f428eb 100644
--- a/Documentation/arm64/memory-tagging-extension.rst
+++ b/Documentation/arm64/memory-tagging-extension.rst
@@ -77,14 +77,20 @@ configurable behaviours:
address is unknown).
The user can select the above modes, per thread, using the
-``prctl(PR_SET_TAGGED_ADDR_CTRL, flags, 0, 0, 0)`` system call where
-``flags`` contain one of the following values in the ``PR_MTE_TCF_MASK``
+``prctl(PR_SET_TAGGED_ADDR_CTRL, flags, 0, 0, 0)`` system call where ``flags``
+contains any number of the following values in the ``PR_MTE_TCF_MASK``
bit-field:
-- ``PR_MTE_TCF_NONE`` - *Ignore* tag check faults
+- ``PR_MTE_TCF_NONE``  - *Ignore* tag check faults
+ (ignored if combined with other options)
- ``PR_MTE_TCF_SYNC`` - *Synchronous* tag check fault mode
- ``PR_MTE_TCF_ASYNC`` - *Asynchronous* tag check fault mode
+If no modes are specified, tag check faults are ignored. If a single
+mode is specified, the program will run in that mode. If multiple
+modes are specified, the mode is selected as described in the "Per-CPU
+preferred tag checking modes" section below.
+
The current tag check fault mode can be read using the
``prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)`` system call.
@@ -120,13 +126,39 @@ in the ``PR_MTE_TAG_MASK`` bit-field.
interface provides an include mask. An include mask of ``0`` (exclusion
mask ``0xffff``) results in the CPU always generating tag ``0``.
+Per-CPU preferred tag checking mode
+-----------------------------------
+
+On some CPUs the performance of MTE in stricter tag checking modes
+is similar to that of less strict tag checking modes. This makes it
+worthwhile to enable stricter checks on those CPUs when a less strict
+checking mode is requested, in order to gain the error detection
+benefits of the stricter checks without the performance downsides. To
+support this scenario, a privileged user may configure a stricter
+tag checking mode as the CPU's preferred tag checking mode.
+
+The preferred tag checking mode for each CPU is controlled by
+``/sys/devices/system/cpu/cpu<N>/mte_tcf_preferred``, to which a
+privileged user may write the value ``async`` or ``sync``. The default
+preferred mode for each CPU is ``async``.
+
+To allow a program to potentially run in the CPU's preferred tag
+checking mode, the user program may set multiple tag check fault mode
+bits in the ``flags`` argument to the ``prctl(PR_SET_TAGGED_ADDR_CTRL,
+flags, 0, 0, 0)`` system call. If the CPU's preferred tag checking
+mode is in the task's set of provided tag checking modes (this will
+always be the case at present because the kernel only supports two
+tag checking modes, but future kernels may support more modes), that
+mode will be selected. Otherwise, one of the modes in the task's mode
+set will be selected in a currently unspecified manner.
+
Initial process state
---------------------
On ``execve()``, the new process has the following configuration:
- ``PR_TAGGED_ADDR_ENABLE`` set to 0 (disabled)
-- Tag checking mode set to ``PR_MTE_TCF_NONE``
+- No tag checking modes are selected (tag check faults ignored)
- ``PR_MTE_TAG_MASK`` set to 0 (all tags excluded)
- ``PSTATE.TCO`` set to 0
- ``PROT_MTE`` not set on any of the initial memory maps
@@ -251,11 +283,13 @@ Example of correct usage
return EXIT_FAILURE;
/*
- * Enable the tagged address ABI, synchronous MTE tag check faults and
- * allow all non-zero tags in the randomly generated set.
+ * Enable the tagged address ABI, synchronous or asynchronous MTE
+ * tag check faults (based on per-CPU preference) and allow all
+ * non-zero tags in the randomly generated set.
*/
if (prctl(PR_SET_TAGGED_ADDR_CTRL,
- PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
+ PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC |
+ (0xfffe << PR_MTE_TAG_SHIFT),
0, 0, 0)) {
perror("prctl() failed");
return EXIT_FAILURE;
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7bc37d0a1b68..ded13230f901 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -165,8 +165,11 @@ Image: vmlinux
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-zinstall install:
- $(Q)$(MAKE) $(build)=$(boot) $@
+install: install-image := Image
+zinstall: install-image := Image.gz
+install zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
+ $(boot)/$(install-image) System.map "$(INSTALL_PATH)"
PHONY += vdso_install
vdso_install:
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index cd3414898d10..ebe80faab883 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -35,11 +35,3 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
$(obj)/Image.lzo: $(obj)/Image FORCE
$(call if_changed,lzo)
-
-install:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
- $(obj)/Image System.map "$(INSTALL_PATH)"
-
-zinstall:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
- $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9bb9d11750d7..cdfa2a242e9f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -552,7 +552,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
- if (val == 0xf)
+ if (val == ID_AA64DFR0_PMUVER_IMP_DEF)
val = 0;
if (val > cap) {
@@ -657,7 +657,8 @@ static inline bool system_supports_4kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT);
- return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
+ return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
+ (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
}
static inline bool system_supports_64kb_granule(void)
@@ -669,7 +670,8 @@ static inline bool system_supports_64kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT);
- return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
+ return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
+ (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
}
static inline bool system_supports_16kb_granule(void)
@@ -681,7 +683,8 @@ static inline bool system_supports_16kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT);
- return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
+ return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
+ (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
}
static inline bool system_supports_mixed_endian_el0(void)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 21fa330f498d..29bcd791f793 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -150,8 +150,17 @@
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
cbz x1, .Lskip_fgt_\@
- msr_s SYS_HDFGRTR_EL2, xzr
- msr_s SYS_HDFGWTR_EL2, xzr
+ mov x0, xzr
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
+ cmp x1, #3
+ b.lt .Lset_fgt_\@
+ /* Disable PMSNEVFR_EL1 read and write traps */
+ orr x0, x0, #(1 << 62)
+
+.Lset_fgt_\@:
+ msr_s SYS_HDFGRTR_EL2, x0
+ msr_s SYS_HDFGWTR_EL2, x0
msr_s SYS_HFGRTR_EL2, xzr
msr_s SYS_HFGWTR_EL2, xzr
msr_s SYS_HFGITR_EL2, xzr
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index c072161d5c65..9a62884183e5 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -45,7 +45,6 @@ extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
-extern void fpsimd_bind_task_to_cpu(void);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
void *sve_state, unsigned int sve_vl);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 059204477ce6..00a2c0b69c2b 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -94,6 +94,7 @@
.endm
/* SVE instruction encodings for non-SVE-capable assemblers */
+/* (pre binutils 2.28, all kernel capable clang versions support SVE) */
/* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */
.macro _sve_str_v nz, nxbase, offset=0
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 3512184cfec1..96dc0f7da258 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -65,8 +65,8 @@
#define EARLY_KASLR (0)
#endif
-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
+#define EARLY_ENTRIES(vstart, vend, shift) \
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 824a3655dd93..f1745a843414 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -243,9 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
-#define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
-#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
#define arch_set_mem_tag_range(addr, size, tag, init) \
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 75beffe2ee8a..e9c30859f80c 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -27,11 +27,32 @@ typedef struct {
} mm_context_t;
/*
- * This macro is only used by the TLBI and low-level switch_mm() code,
- * neither of which can race with an ASID change. We therefore don't
- * need to reload the counter using atomic64_read().
+ * We use atomic64_read() here because the ASID for an 'mm_struct' can
+ * be reallocated when scheduling one of its threads following a
+ * rollover event (see new_context() and flush_context()). In this case,
+ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
+ * may use a stale ASID. This is fine in principle as the new ASID is
+ * guaranteed to be clean in the TLB, but the TLBI routines have to take
+ * care to handle the following race:
+ *
+ * CPU 0 CPU 1 CPU 2
+ *
+ * // ptep_clear_flush(mm)
+ * xchg_relaxed(pte, 0)
+ * DSB ISHST
+ * old = ASID(mm)
+ * | <rollover>
+ * | new = new_context(mm)
+ * \-----------------> atomic_set(mm->context.id, new)
+ * cpu_switch_mm(mm)
+ * // Hardware walk of pte using new ASID
+ * TLBI(old)
+ *
+ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
+ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
+ * written by CPU 0.
*/
-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
static inline bool arm64_kernel_unmapped_at_el0(void)
{
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index d952352bd008..22420e1f8c03 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -130,10 +130,6 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
-void mte_init_tags(u64 max_tag);
-
-void mte_set_report_once(bool state);
-bool mte_report_once(void);
#else /* CONFIG_ARM64_MTE */
@@ -165,19 +161,6 @@ static inline void mte_enable_kernel_async(void)
{
}
-static inline void mte_init_tags(u64 max_tag)
-{
-}
-
-static inline void mte_set_report_once(bool state)
-{
-}
-
-static inline bool mte_report_once(void)
-{
- return false;
-}
-
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 58c7f80f5596..3f93b9e0b339 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -16,8 +16,6 @@
#include <asm/pgtable-types.h>
-extern u64 gcr_kernel_excl;
-
void mte_clear_page_tags(void *addr);
unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
unsigned long n);
@@ -43,7 +41,6 @@ void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
void mte_suspend_enter(void);
-void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
@@ -72,9 +69,6 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void)
{
}
-static inline void mte_suspend_exit(void)
-{
-}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
return 0;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f09bf5c02891..dfa76afa0ccf 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -715,7 +715,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
return (pud_t *)__va(p4d_page_paddr(p4d));
}
-/* Find an entry in the frst-level page table. */
+/* Find an entry in the first-level page table. */
#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index 28a78b67d9b4..efb098de3a84 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -10,6 +10,9 @@
#include <asm/memory.h>
#include <asm/sysreg.h>
+#define PR_PAC_ENABLED_KEYS_MASK \
+ (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
+
#ifdef CONFIG_ARM64_PTR_AUTH
/*
* Each key is a 128-bit quantity which is split across a pair of 64-bit
@@ -117,9 +120,9 @@ static __always_inline void ptrauth_enable(void)
\
/* enable all keys */ \
if (system_supports_address_auth()) \
- set_task_sctlr_el1(current->thread.sctlr_user | \
- SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
- SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \
+ ptrauth_set_enabled_keys(current, \
+ PR_PAC_ENABLED_KEYS_MASK, \
+ PR_PAC_ENABLED_KEYS_MASK); \
} while (0)
#define ptrauth_thread_switch_user(tsk) \
@@ -146,7 +149,4 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
-#define PR_PAC_ENABLED_KEYS_MASK \
- (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
-
#endif /* __ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index b6517fd03d7b..ee2bdc1b9f5b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -16,6 +16,12 @@
*/
#define NET_IP_ALIGN 0
+#define MTE_CTRL_GCR_USER_EXCL_SHIFT 0
+#define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff
+
+#define MTE_CTRL_TCF_SYNC (1UL << 16)
+#define MTE_CTRL_TCF_ASYNC (1UL << 17)
+
#ifndef __ASSEMBLY__
#include <linux/build_bug.h>
@@ -153,7 +159,7 @@ struct thread_struct {
#endif
#endif
#ifdef CONFIG_ARM64_MTE
- u64 gcr_user_excl;
+ u64 mte_ctrl;
#endif
u64 sctlr_user;
};
@@ -253,7 +259,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-void set_task_sctlr_el1(u64 sctlr);
+void update_sctlr_el1(u64 sctlr);
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
index 89cba2622b79..6a75d7ecdcaa 100644
--- a/arch/arm64/include/asm/simd.h
+++ b/arch/arm64/include/asm/simd.h
@@ -37,7 +37,7 @@ static __must_check inline bool may_use_simd(void)
*/
return !WARN_ON(!system_capabilities_finalized()) &&
system_supports_fpsimd() &&
- !in_irq() && !irqs_disabled() && !in_nmi() &&
+ !in_hardirq() && !irqs_disabled() && !in_nmi() &&
!this_cpu_read(fpsimd_context_busy);
}
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7b9c3acba684..f2e06e7c0a31 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/stringify.h>
+#include <linux/kasan-tags.h>
/*
* ARMv8 ARM reserves the following encoding for system registers:
@@ -698,8 +699,7 @@
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
- SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
- SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
+ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
@@ -847,12 +847,16 @@
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
-#define ID_AA64MMFR0_TGRAN4_NI 0xf
-#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
-#define ID_AA64MMFR0_TGRAN64_NI 0xf
-#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
-#define ID_AA64MMFR0_TGRAN16_NI 0x0
-#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
+#define ID_AA64MMFR0_TGRAN4_NI 0xf
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN64_NI 0xf
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN16_NI 0x0
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
+
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
@@ -1028,16 +1032,16 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
#elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
#endif
#define MVFR2_FPMISC_SHIFT 4
@@ -1067,6 +1071,21 @@
#define SYS_GCR_EL1_RRND (BIT(16))
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+#ifdef CONFIG_KASAN_HW_TAGS
+/*
+ * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it
+ * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF.
+ */
+#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf)
+#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf)
+#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN)
+#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL)
+#else
+#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK
+#endif
+
+#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL)
+
/* RGSR_EL1 Definitions */
#define SYS_RGSR_EL1_TAG_MASK 0xfUL
#define SYS_RGSR_EL1_SEED_SHIFT 8
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index cc3f5a33ff9c..412a3b9a3c25 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -245,9 +245,10 @@ static inline void flush_tlb_all(void)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- unsigned long asid = __TLBI_VADDR(0, ASID(mm));
+ unsigned long asid;
dsb(ishst);
+ asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
dsb(ish);
@@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
unsigned long uaddr)
{
- unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
+ unsigned long addr;
dsb(ishst);
+ addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
}
@@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
int num = 0;
int scale = 0;
- unsigned long asid = ASID(vma->vm_mm);
- unsigned long addr;
- unsigned long pages;
+ unsigned long asid, addr, pages;
start = round_down(start, stride);
end = round_up(end, stride);
@@ -305,10 +305,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
}
dsb(ishst);
+ asid = ASID(vma->vm_mm);
/*
* When the CPU does not support TLB range operations, flush the TLB
- * entries one by one at the granularity of 'stride'. If the the TLB
+ * entries one by one at the granularity of 'stride'. If the TLB
* range ops are supported, then:
*
* 1. If 'pages' is odd, flush the first page through non-range
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index c85670692afa..551427ae8cc5 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -52,7 +52,7 @@ int main(void)
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
- DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
+ DEFINE(THREAD_MTE_CTRL, offsetof(struct task_struct, thread.mte_ctrl));
#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0ead8bfedf20..23b1e3d83603 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1561,8 +1561,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (!cpu)
arm64_use_ng_mappings = true;
-
- return;
}
#else
static void
@@ -1734,7 +1732,7 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
u64 val = read_sysreg_s(SYS_CLIDR_EL1);
/* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
- WARN_ON(val & (7 << 27 | 7 << 21));
+ WARN_ON(CLIDR_LOUU(val) || CLIDR_LOUIS(val));
}
#ifdef CONFIG_ARM64_PAN
@@ -1843,6 +1841,9 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
#ifdef CONFIG_ARM64_MTE
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
+ isb();
+
/*
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 0a7a64753878..196e921f61de 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -33,11 +33,24 @@ SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE
+/*
+ * Save the SVE state
+ *
+ * x0 - pointer to buffer for state
+ * x1 - pointer to storage for FPSR
+ */
SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
SYM_FUNC_END(sve_save_state)
+/*
+ * Load the SVE state
+ *
+ * x0 - pointer to buffer for state
+ * x1 - pointer to storage for FPSR
+ * x2 - VQ-1
+ */
SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4
ret
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 863d44f73028..572f72f20646 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -133,42 +133,46 @@ alternative_cb_end
.endm
/* Check for MTE asynchronous tag check faults */
- .macro check_mte_async_tcf, tmp, ti_flags
+ .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
#ifdef CONFIG_ARM64_MTE
.arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
+ /*
+ * Asynchronous tag check faults are only possible in ASYNC (2) or
+ * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
+ * set, so skip the check if it is unset.
+ */
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
mov \tmp, #_TIF_MTE_ASYNC_FAULT
add \ti_flags, tsk, #TSK_TI_FLAGS
stset \tmp, [\ti_flags]
- msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
.endm
/* Clear the MTE asynchronous tag check faults */
- .macro clear_mte_async_tcf
+ .macro clear_mte_async_tcf thread_sctlr
#ifdef CONFIG_ARM64_MTE
alternative_if ARM64_MTE
+ /* See comment in check_mte_async_tcf above. */
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
dsb ish
msr_s SYS_TFSRE0_EL1, xzr
+1:
alternative_else_nop_endif
#endif
.endm
- .macro mte_set_gcr, tmp, tmp2
+ .macro mte_set_gcr, mte_ctrl, tmp
#ifdef CONFIG_ARM64_MTE
- /*
- * Calculate and set the exclude mask preserving
- * the RRND (bit[16]) setting.
- */
- mrs_s \tmp2, SYS_GCR_EL1
- bfi \tmp2, \tmp, #0, #16
- msr_s SYS_GCR_EL1, \tmp2
+ ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
+ orr \tmp, \tmp, #SYS_GCR_EL1_RRND
+ msr_s SYS_GCR_EL1, \tmp
#endif
.endm
@@ -177,10 +181,8 @@ alternative_else_nop_endif
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
- ldr_l \tmp, gcr_kernel_excl
-
- mte_set_gcr \tmp, \tmp2
- isb
+ mov \tmp, KERNEL_GCR_EL1
+ msr_s SYS_GCR_EL1, \tmp
1:
#endif
.endm
@@ -190,7 +192,7 @@ alternative_else_nop_endif
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
- ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
+ ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2
1:
@@ -231,8 +233,8 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
- check_mte_async_tcf x22, x23
- apply_ssbd 1, x22, x23
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ check_mte_async_tcf x22, x23, x0
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -245,7 +247,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
* was disabled on kernel exit then we would have left the kernel IA
* installed so there is no need to install it again.
*/
- ldr x0, [tsk, THREAD_SCTLR_USER]
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
b 2f
@@ -254,12 +255,26 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
orr x0, x0, SCTLR_ELx_ENIA
msr sctlr_el1, x0
2:
- isb
alternative_else_nop_endif
#endif
+ apply_ssbd 1, x22, x23
+
mte_set_kernel_gcr x22, x23
+ /*
+ * Any non-self-synchronizing system register updates required for
+ * kernel entry should be placed before this point.
+ */
+alternative_if ARM64_MTE
+ isb
+ b 1f
+alternative_else_nop_endif
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ isb
+alternative_else_nop_endif
+1:
+
scs_load tsk
.else
add x21, sp, #PT_REGS_SIZE
@@ -362,6 +377,10 @@ alternative_else_nop_endif
3:
scs_save tsk
+ /* Ignore asynchronous tag check faults in the uaccess routines */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ clear_mte_async_tcf x0
+
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
/*
@@ -371,7 +390,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
*
* No kernel C function calls after this.
*/
- ldr x0, [tsk, THREAD_SCTLR_USER]
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_user tsk, x0, x1, x2
b 2f
@@ -517,12 +535,13 @@ SYM_CODE_START(vectors)
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
+SYM_CODE_START_LOCAL(__bad_stack)
/*
* We detected an overflow in kernel_ventry, which switched to the
* overflow stack. Stash the exception regs, and head to our overflow
* handler.
*/
-__bad_stack:
+
/* Restore the original x0 value */
mrs x0, tpidrro_el0
@@ -542,6 +561,7 @@ __bad_stack:
/* Time to die */
bl handle_bad_stack
ASM_BUG()
+SYM_CODE_END(__bad_stack)
#endif /* CONFIG_VMAP_STACK */
@@ -599,8 +619,6 @@ SYM_CODE_START_LOCAL(ret_to_user)
cbnz x2, work_pending
finish_ret_to_user:
user_enter_irqoff
- /* Ignore asynchronous tag check faults in the uaccess routines */
- clear_mte_async_tcf
enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index e57b23f95284..5a294f20e9de 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -162,6 +162,8 @@ extern void __percpu *efi_sve_state;
DEFINE_PER_CPU(bool, fpsimd_context_busy);
EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
+static void fpsimd_bind_task_to_cpu(void);
+
static void __get_cpu_fpsimd_context(void)
{
bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
@@ -518,12 +520,6 @@ void sve_alloc(struct task_struct *task)
/* This is a small allocation (maximum ~8KB) and Should Not Fail. */
task->thread.sve_state =
kzalloc(sve_state_size(task), GFP_KERNEL);
-
- /*
- * If future SVE revisions can have larger vectors though,
- * this may cease to be true:
- */
- BUG_ON(!task->thread.sve_state);
}
@@ -943,6 +939,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
}
sve_alloc(current);
+ if (!current->thread.sve_state) {
+ force_sig(SIGKILL);
+ return;
+ }
get_cpu_fpsimd_context();
@@ -1112,7 +1112,7 @@ void fpsimd_signal_preserve_current_state(void)
* The caller must have ownership of the cpu FPSIMD context before calling
* this function.
*/
-void fpsimd_bind_task_to_cpu(void)
+static void fpsimd_bind_task_to_cpu(void)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c5c994a73a64..17962452e31d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -177,7 +177,7 @@ SYM_CODE_END(preserve_boot_args)
* to be composed of multiple pages. (This effectively scales the end index).
*
* vstart: virtual address of start of range
- * vend: virtual address of end of range
+ * vend: virtual address of end of range - we map [vstart, vend]
* shift: shift used to transform virtual address into index
* ptrs: number of entries in page table
* istart: index in table corresponding to vstart
@@ -214,17 +214,18 @@ SYM_CODE_END(preserve_boot_args)
*
* tbl: location of page table
* rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- * vstart: start address to map
- * vend: end address to map - we map [vstart, vend]
+ * vstart: virtual address of start of range
+ * vend: virtual address of end of range - we map [vstart, vend - 1]
* flags: flags to use to map last level entries
* phys: physical address corresponding to vstart - physical memory is contiguous
* pgds: the number of pgd entries
*
* Temporaries: istart, iend, tmp, count, sv - these need to be different registers
- * Preserves: vstart, vend, flags
- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
+ * Preserves: vstart, flags
+ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
*/
.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
+ sub \vend, \vend, #1
add \rtbl, \tbl, #PAGE_SIZE
mov \sv, \rtbl
mov \count, #0
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 53a381a7f65d..d8e606fe3c21 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -54,6 +54,7 @@ static const struct ftr_set_desc pfr1 __initconst = {
.override = &id_aa64pfr1_override,
.fields = {
{ "bt", ID_AA64PFR1_BT_SHIFT },
+ { "mte", ID_AA64PFR1_MTE_SHIFT},
{}
},
};
@@ -100,6 +101,7 @@ static const struct {
{ "arm64.nopauth",
"id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
"id_aa64isar1.api=0 id_aa64isar1.apa=0" },
+ { "arm64.nomte", "id_aa64pfr1.mte=0" },
{ "nokaslr", "kaslr.disabled=1" },
};
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 36f51b0e438a..9d314a3bad3b 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/prctl.h>
@@ -22,9 +23,7 @@
#include <asm/ptrace.h>
#include <asm/sysreg.h>
-u64 gcr_kernel_excl __ro_after_init;
-
-static bool report_fault_once = true;
+static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
@@ -101,26 +100,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}
-void mte_init_tags(u64 max_tag)
-{
- static bool gcr_kernel_excl_initialized;
-
- if (!gcr_kernel_excl_initialized) {
- /*
- * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
- * This conversion extracts an MTE tag from a KASAN tag.
- */
- u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
- max_tag), 0);
-
- gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
- gcr_kernel_excl_initialized = true;
- }
-
- /* Enable the kernel exclude mask for random tags generation. */
- write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
-}
-
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
@@ -160,16 +139,6 @@ void mte_enable_kernel_async(void)
}
#endif
-void mte_set_report_once(bool state)
-{
- WRITE_ONCE(report_fault_once, state);
-}
-
-bool mte_report_once(void)
-{
- return READ_ONCE(report_fault_once);
-}
-
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
@@ -193,14 +162,26 @@ void mte_check_tfsr_el1(void)
}
#endif
-static void set_gcr_el1_excl(u64 excl)
+static void mte_update_sctlr_user(struct task_struct *task)
{
- current->thread.gcr_user_excl = excl;
-
/*
- * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
- * by mte_set_user_gcr() in kernel_exit,
+ * This must be called with preemption disabled and can only be called
+ * on the current or next task since the CPU must match where the thread
+ * is going to run. The caller is responsible for calling
+ * update_sctlr_el1() later in the same preemption disabled block.
*/
+ unsigned long sctlr = task->thread.sctlr_user;
+ unsigned long mte_ctrl = task->thread.mte_ctrl;
+ unsigned long pref, resolved_mte_tcf;
+
+ pref = __this_cpu_read(mte_tcf_preferred);
+ resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
+ sctlr &= ~SCTLR_EL1_TCF0_MASK;
+ if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
+ sctlr |= SCTLR_EL1_TCF0_ASYNC;
+ else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
+ sctlr |= SCTLR_EL1_TCF0_SYNC;
+ task->thread.sctlr_user = sctlr;
}
void mte_thread_init_user(void)
@@ -212,15 +193,14 @@ void mte_thread_init_user(void)
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
- /* disable tag checking */
- set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
- SCTLR_EL1_TCF0_NONE);
- /* reset tag generation mask */
- set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
+ /* disable tag checking and reset tag generation mask */
+ set_mte_ctrl(current, 0);
}
void mte_thread_switch(struct task_struct *next)
{
+ mte_update_sctlr_user(next);
+
/*
* Check if an async tag exception occurred at EL1.
*
@@ -248,44 +228,25 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1();
}
-void mte_suspend_exit(void)
-{
- if (!system_supports_mte())
- return;
-
- sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
- isb();
-}
-
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
- u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
- u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
- SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
if (!system_supports_mte())
return 0;
- switch (arg & PR_MTE_TCF_MASK) {
- case PR_MTE_TCF_NONE:
- sctlr |= SCTLR_EL1_TCF0_NONE;
- break;
- case PR_MTE_TCF_SYNC:
- sctlr |= SCTLR_EL1_TCF0_SYNC;
- break;
- case PR_MTE_TCF_ASYNC:
- sctlr |= SCTLR_EL1_TCF0_ASYNC;
- break;
- default:
- return -EINVAL;
- }
-
- if (task != current) {
- task->thread.sctlr_user = sctlr;
- task->thread.gcr_user_excl = gcr_excl;
- } else {
- set_task_sctlr_el1(sctlr);
- set_gcr_el1_excl(gcr_excl);
+ if (arg & PR_MTE_TCF_ASYNC)
+ mte_ctrl |= MTE_CTRL_TCF_ASYNC;
+ if (arg & PR_MTE_TCF_SYNC)
+ mte_ctrl |= MTE_CTRL_TCF_SYNC;
+
+ task->thread.mte_ctrl = mte_ctrl;
+ if (task == current) {
+ preempt_disable();
+ mte_update_sctlr_user(task);
+ update_sctlr_el1(task->thread.sctlr_user);
+ preempt_enable();
}
return 0;
@@ -294,24 +255,18 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
- u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = task->thread.mte_ctrl;
+ u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
ret = incl << PR_MTE_TAG_SHIFT;
-
- switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
- case SCTLR_EL1_TCF0_NONE:
- ret |= PR_MTE_TCF_NONE;
- break;
- case SCTLR_EL1_TCF0_SYNC:
- ret |= PR_MTE_TCF_SYNC;
- break;
- case SCTLR_EL1_TCF0_ASYNC:
+ if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
ret |= PR_MTE_TCF_ASYNC;
- break;
- }
+ if (mte_ctrl & MTE_CTRL_TCF_SYNC)
+ ret |= PR_MTE_TCF_SYNC;
return ret;
}
@@ -450,3 +405,54 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
return ret;
}
+
+static ssize_t mte_tcf_preferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (per_cpu(mte_tcf_preferred, dev->id)) {
+ case MTE_CTRL_TCF_ASYNC:
+ return sysfs_emit(buf, "async\n");
+ case MTE_CTRL_TCF_SYNC:
+ return sysfs_emit(buf, "sync\n");
+ default:
+ return sysfs_emit(buf, "???\n");
+ }
+}
+
+static ssize_t mte_tcf_preferred_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 tcf;
+
+ if (sysfs_streq(buf, "async"))
+ tcf = MTE_CTRL_TCF_ASYNC;
+ else if (sysfs_streq(buf, "sync"))
+ tcf = MTE_CTRL_TCF_SYNC;
+ else
+ return -EINVAL;
+
+ device_lock(dev);
+ per_cpu(mte_tcf_preferred, dev->id) = tcf;
+ device_unlock(dev);
+
+ return count;
+}
+static DEVICE_ATTR_RW(mte_tcf_preferred);
+
+static int register_mte_tcf_preferred_sysctl(void)
+{
+ unsigned int cpu;
+
+ if (!system_supports_mte())
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
+ device_create_file(get_cpu_device(cpu),
+ &dev_attr_mte_tcf_preferred);
+ }
+
+ return 0;
+}
+subsys_initcall(register_mte_tcf_preferred_sysctl);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index d07788dad388..b4044469527e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1055,7 +1055,7 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT);
- if (pmuver == 0xf || pmuver == 0)
+ if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
return;
cpu_pmu->pmuver = pmuver;
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
index 60901ab0a7fe..2708b620b4ae 100644
--- a/arch/arm64/kernel/pointer_auth.c
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -67,7 +67,7 @@ static u64 arg_to_enxx_mask(unsigned long arg)
int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled)
{
- u64 sctlr = tsk->thread.sctlr_user;
+ u64 sctlr;
if (!system_supports_address_auth())
return -EINVAL;
@@ -78,12 +78,14 @@ int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
return -EINVAL;
+ preempt_disable();
+ sctlr = tsk->thread.sctlr_user;
sctlr &= ~arg_to_enxx_mask(keys);
sctlr |= arg_to_enxx_mask(enabled);
+ tsk->thread.sctlr_user = sctlr;
if (tsk == current)
- set_task_sctlr_el1(sctlr);
- else
- tsk->thread.sctlr_user = sctlr;
+ update_sctlr_el1(sctlr);
+ preempt_enable();
return 0;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c8989b999250..5464d575192b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -163,7 +163,7 @@ static void print_pstate(struct pt_regs *regs)
u64 pstate = regs->pstate;
if (compat_user_mode(regs)) {
- printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
pstate,
pstate & PSR_AA32_N_BIT ? 'N' : 'n',
pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
@@ -174,12 +174,14 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_AA32_E_BIT ? "BE" : "LE",
pstate & PSR_AA32_A_BIT ? 'A' : 'a',
pstate & PSR_AA32_I_BIT ? 'I' : 'i',
- pstate & PSR_AA32_F_BIT ? 'F' : 'f');
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f',
+ pstate & PSR_AA32_DIT_BIT ? '+' : '-',
+ pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
} else {
const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
PSR_BTYPE_SHIFT];
- printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
@@ -192,6 +194,8 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_PAN_BIT ? '+' : '-',
pstate & PSR_UAO_BIT ? '+' : '-',
pstate & PSR_TCO_BIT ? '+' : '-',
+ pstate & PSR_DIT_BIT ? '+' : '-',
+ pstate & PSR_SSBS_BIT ? '+' : '-',
btype_str);
}
}
@@ -477,7 +481,13 @@ static void compat_thread_switch(struct task_struct *next)
set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
}
-static void update_sctlr_el1(u64 sctlr)
+/*
+ * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
+ * this function must be called with preemption disabled and the update to
+ * sctlr_user must be made in the same preemption disabled block so that
+ * __switch_to() does not see the variable update before the SCTLR_EL1 one.
+ */
+void update_sctlr_el1(u64 sctlr)
{
/*
* EnIA must not be cleared while in the kernel as this is necessary for
@@ -489,19 +499,6 @@ static void update_sctlr_el1(u64 sctlr)
isb();
}
-void set_task_sctlr_el1(u64 sctlr)
-{
- /*
- * __switch_to() checks current->thread.sctlr as an
- * optimisation. Disable preemption so that it does not see
- * the variable update before the SCTLR_EL1 one.
- */
- preempt_disable();
- current->thread.sctlr_user = sctlr;
- update_sctlr_el1(sctlr);
- preempt_enable();
-}
-
/*
* Thread switching.
*/
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 499b6b2f9757..956e2df27180 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -845,6 +845,11 @@ static int sve_set(struct task_struct *target,
}
sve_alloc(target);
+ if (!target->thread.sve_state) {
+ ret = -ENOMEM;
+ clear_tsk_thread_flag(target, TIF_SVE);
+ goto out;
+ }
/*
* Ensure target->thread.sve_state is up to date with target's
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index f8192f4ae0b8..3b4c927d31ff 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -289,6 +289,11 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sve_alloc(current);
+ if (!current->thread.sve_state) {
+ clear_thread_flag(TIF_SVE);
+ return -ENOMEM;
+ }
+
err = __copy_from_user(current->thread.sve_state,
(char __user const *)user->sve +
SVE_SIG_REGS_OFFSET,
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 2f507f565c48..4850333669fb 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -46,8 +46,6 @@ struct compat_aux_sigframe {
unsigned long end_magic;
} __attribute__((__aligned__(8)));
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -190,10 +188,8 @@ static int compat_restore_sigframe(struct pt_regs *regs,
unsigned long psr;
err = get_sigset_t(&set, &sf->uc.uc_sigmask);
- if (err == 0) {
- sigdelsetmask(&set, ~_BLOCKABLE);
+ if (err == 0)
set_current_blocked(&set);
- }
__get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
__get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 938ce6fbee8a..19ee7c33769d 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -76,7 +76,6 @@ void notrace __cpu_suspend_exit(void)
spectre_v4_enable_mitigation(NULL);
/* Restore additional feature-specific configuration */
- mte_suspend_exit();
ptrauth_suspend_exit();
}
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index b506a4b1e38c..fccfe363e567 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -185,7 +185,7 @@ u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
- pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
+ pr_err("%s: unknown immediate encoding %d\n", __func__,
type);
return 0;
}
@@ -215,7 +215,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
- pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ pr_err("%s: unknown immediate encoding %d\n", __func__,
type);
return AARCH64_BREAK_FAULT;
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 349c488765ca..9ae24e3b72be 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -309,24 +309,11 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
static void report_tag_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- static bool reported;
- bool is_write;
-
- if (READ_ONCE(reported))
- return;
-
- /*
- * This is used for KASAN tests and assumes that no MTE faults
- * happened before running the tests.
- */
- if (mte_report_once())
- WRITE_ONCE(reported, true);
-
/*
* SAS bits aren't set for all faults reported in EL1, so we can't
* find out access size.
*/
- is_write = !!(esr & ESR_ELx_WNR);
+ bool is_write = !!(esr & ESR_ELx_WNR);
kasan_report(addr, 0, is_write, regs->pc);
}
#else
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 35936c5ae1ce..d35c90d2e47a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -437,8 +437,7 @@ SYM_FUNC_START(__cpu_setup)
mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
- /* initialize GCR_EL1: all non-zero tags excluded by default */
- mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
+ mov x10, #KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, x10
/*
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 967d9c55323d..89de78a14b9b 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -234,14 +234,15 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */
-# define PR_MTE_TCF_SHIFT 1
-# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_NONE 0
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
/* MTE tag inclusion mask */
# define PR_MTE_TAG_SHIFT 3
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 8f7b0b2f6e11..8be9d4b3b259 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -53,7 +53,6 @@ static int kasan_test_init(struct kunit *test)
}
multishot = kasan_save_enable_multi_shot();
- kasan_set_tagging_report_once(false);
fail_data.report_found = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
"kasan_data", &fail_data);
@@ -62,7 +61,6 @@ static int kasan_test_init(struct kunit *test)
static void kasan_test_exit(struct kunit *test)
{
- kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
KUNIT_EXPECT_FALSE(test, fail_data.report_found);
}
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4ea8c368b5b8..e4c16f6b6680 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -142,8 +142,6 @@ void kasan_init_hw_tags_cpu(void)
if (kasan_arg == KASAN_ARG_OFF)
return;
- hw_init_tags(KASAN_TAG_MAX);
-
/*
* Enable async mode only when explicitly requested through
* the command line.
@@ -250,12 +248,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-void kasan_set_tagging_report_once(bool state)
-{
- hw_set_tagging_report_once(state);
-}
-EXPORT_SYMBOL_GPL(kasan_set_tagging_report_once);
-
void kasan_enable_tagging_sync(void)
{
hw_enable_tagging_sync();
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d739cdd1621a..fff93b0bcb08 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -3,6 +3,7 @@
#define __MM_KASAN_KASAN_H
#include <linux/kasan.h>
+#include <linux/kasan-tags.h>
#include <linux/kfence.h>
#include <linux/stackdepot.h>
@@ -51,16 +52,6 @@ extern bool kasan_flag_async __ro_after_init;
#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
-#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
-#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
-#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
-
-#ifdef CONFIG_KASAN_HW_TAGS
-#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
-#else
-#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
-#endif
-
#ifdef CONFIG_KASAN_GENERIC
#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
@@ -299,12 +290,6 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifndef arch_enable_tagging_async
#define arch_enable_tagging_async()
#endif
-#ifndef arch_init_tags
-#define arch_init_tags(max_tag)
-#endif
-#ifndef arch_set_tagging_report_once
-#define arch_set_tagging_report_once(state)
-#endif
#ifndef arch_force_async_tag_fault
#define arch_force_async_tag_fault()
#endif
@@ -320,8 +305,6 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_enable_tagging_sync() arch_enable_tagging_sync()
#define hw_enable_tagging_async() arch_enable_tagging_async()
-#define hw_init_tags(max_tag) arch_init_tags(max_tag)
-#define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
#define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
@@ -332,19 +315,16 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_enable_tagging_sync()
#define hw_enable_tagging_async()
-#define hw_set_tagging_report_once(state)
#endif /* CONFIG_KASAN_HW_TAGS */
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-void kasan_set_tagging_report_once(bool state);
void kasan_enable_tagging_sync(void);
void kasan_force_async_fault(void);
#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
-static inline void kasan_set_tagging_report_once(bool state) { }
static inline void kasan_enable_tagging_sync(void) { }
static inline void kasan_force_async_fault(void) { }