aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c2
-rw-r--r--arch/arm64/kernel/alternative.c92
-rw-r--r--arch/arm64/kernel/compat_alignment.c387
-rw-r--r--arch/arm64/kernel/cpu-reset.S5
-rw-r--r--arch/arm64/kernel/cpu_errata.c31
-rw-r--r--arch/arm64/kernel/cpufeature.c371
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/efi-rt-wrapper.S33
-rw-r--r--arch/arm64/kernel/efi.c78
-rw-r--r--arch/arm64/kernel/elfcore.c16
-rw-r--r--arch/arm64/kernel/entry-common.c35
-rw-r--r--arch/arm64/kernel/entry-ftrace.S7
-rw-r--r--arch/arm64/kernel/entry.S8
-rw-r--r--arch/arm64/kernel/ftrace.c19
-rw-r--r--arch/arm64/kernel/head.S10
-rw-r--r--arch/arm64/kernel/hyp-stub.S8
-rw-r--r--arch/arm64/kernel/idreg-override.c10
-rw-r--r--arch/arm64/kernel/image-vars.h18
-rw-r--r--arch/arm64/kernel/irq.c14
-rw-r--r--arch/arm64/kernel/machine_kexec.c2
-rw-r--r--arch/arm64/kernel/module-plts.c3
-rw-r--r--arch/arm64/kernel/module.c15
-rw-r--r--arch/arm64/kernel/mte.c60
-rw-r--r--arch/arm64/kernel/perf_event.c8
-rw-r--r--arch/arm64/kernel/perf_regs.c30
-rw-r--r--arch/arm64/kernel/probes/kprobes.c27
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/proton-pack.c22
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/ptrace.c27
-rw-r--r--arch/arm64/kernel/reloc_test_core.c4
-rw-r--r--arch/arm64/kernel/sdei.c32
-rw-r--r--arch/arm64/kernel/smp_spin_table.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c66
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/syscall.c2
-rw-r--r--arch/arm64/kernel/topology.c42
-rw-r--r--arch/arm64/kernel/traps.c95
-rw-r--r--arch/arm64/kernel/vdso.c6
-rw-r--r--arch/arm64/kernel/vdso/Makefile3
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S7
43 files changed, 1162 insertions, 453 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 1add7b01efa7..2f361a883d8c 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -45,6 +45,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
obj-$(CONFIG_COMPAT) += sigreturn32.o
+obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
@@ -85,8 +86,8 @@ $(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
obj-y += probes/
-head-y := head.o
-extra-y += $(head-y) vmlinux.lds
+obj-y += head.o
+extra-y += vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y)
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index bfeeb5319abf..b1990e38aed0 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -99,7 +99,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa_symbol(function_nocfi(secondary_entry)),
+ writeq_relaxed(__pa_symbol(secondary_entry),
&mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 9bcaa5eacf16..91263d09ea65 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -10,17 +10,23 @@
#include <linux/init.h>
#include <linux/cpu.h>
+#include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
+#include <asm/module.h>
#include <asm/sections.h>
+#include <asm/vdso.h>
#include <linux/stop_machine.h>
#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT)
+#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT)
+
/* Volatile, as we may be patching the guts of READ_ONCE() */
static volatile int all_alternatives_applied;
@@ -133,7 +139,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end);
}
-static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module,
+static void __apply_alternatives(const struct alt_region *region,
+ bool is_module,
unsigned long *feature_mask)
{
struct alt_instr *alt;
@@ -142,30 +149,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
for (alt = region->begin; alt < region->end; alt++) {
int nr_inst;
+ int cap = ALT_CAP(alt);
- if (!test_bit(alt->cpufeature, feature_mask))
+ if (!test_bit(cap, feature_mask))
continue;
- /* Use ARM64_CB_PATCH as an unconditional patch */
- if (alt->cpufeature < ARM64_CB_PATCH &&
- !cpus_have_cap(alt->cpufeature))
+ if (!cpus_have_cap(cap))
continue;
- if (alt->cpufeature == ARM64_CB_PATCH)
+ if (ALT_HAS_CB(alt))
BUG_ON(alt->alt_len != 0);
else
BUG_ON(alt->alt_len != alt->orig_len);
- pr_info_once("patching kernel code\n");
-
origptr = ALT_ORIG_PTR(alt);
updptr = is_module ? origptr : lm_alias(origptr);
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
- if (alt->cpufeature < ARM64_CB_PATCH)
- alt_cb = patch_alternative;
- else
+ if (ALT_HAS_CB(alt))
alt_cb = ALT_REPL_PTR(alt);
+ else
+ alt_cb = patch_alternative;
alt_cb(alt, origptr, updptr, nr_inst);
@@ -192,30 +196,55 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
}
}
+void apply_alternatives_vdso(void)
+{
+ struct alt_region region;
+ const struct elf64_hdr *hdr;
+ const struct elf64_shdr *shdr;
+ const struct elf64_shdr *alt;
+ DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
+
+ bitmap_fill(all_capabilities, ARM64_NCAPS);
+
+ hdr = (struct elf64_hdr *)vdso_start;
+ shdr = (void *)hdr + hdr->e_shoff;
+ alt = find_section(hdr, shdr, ".altinstructions");
+ if (!alt)
+ return;
+
+ region = (struct alt_region){
+ .begin = (void *)hdr + alt->sh_offset,
+ .end = (void *)hdr + alt->sh_offset + alt->sh_size,
+ };
+
+ __apply_alternatives(&region, false, &all_capabilities[0]);
+}
+
+static const struct alt_region kernel_alternatives = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+};
+
/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- struct alt_region region = {
- .begin = (struct alt_instr *)__alt_instructions,
- .end = (struct alt_instr *)__alt_instructions_end,
- };
-
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
while (!all_alternatives_applied)
cpu_relax();
isb();
} else {
- DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
bitmap_complement(remaining_capabilities, boot_capabilities,
- ARM64_NPATCHABLE);
+ ARM64_NCAPS);
BUG_ON(all_alternatives_applied);
- __apply_alternatives(&region, false, remaining_capabilities);
+ __apply_alternatives(&kernel_alternatives, false,
+ remaining_capabilities);
/* Barriers provided by the cache flushing */
all_alternatives_applied = 1;
}
@@ -225,6 +254,9 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void)
{
+ pr_info("applying system-wide alternatives\n");
+
+ apply_alternatives_vdso();
/* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
@@ -236,15 +268,13 @@ void __init apply_alternatives_all(void)
*/
void __init apply_boot_alternatives(void)
{
- struct alt_region region = {
- .begin = (struct alt_instr *)__alt_instructions,
- .end = (struct alt_instr *)__alt_instructions_end,
- };
-
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
- __apply_alternatives(&region, false, &boot_capabilities[0]);
+ pr_info("applying boot alternatives\n");
+
+ __apply_alternatives(&kernel_alternatives, false,
+ &boot_capabilities[0]);
}
#ifdef CONFIG_MODULES
@@ -254,10 +284,18 @@ void apply_alternatives_module(void *start, size_t length)
.begin = start,
.end = start + length,
};
- DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
+ DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
- bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
+ bitmap_fill(all_capabilities, ARM64_NCAPS);
__apply_alternatives(&region, true, &all_capabilities[0]);
}
#endif
+
+noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ for (int i = 0; i < nr_inst; i++)
+ updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
+}
+EXPORT_SYMBOL(alt_cb_patch_nops);
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
new file mode 100644
index 000000000000..5edec2f49ec9
--- /dev/null
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// based on arch/arm/mm/alignment.c
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/exception.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+/*
+ * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
+ *
+ * Speed optimisations and better fault handling by Russell King.
+ */
+#define CODING_BITS(i) (i & 0x0e000000)
+
+#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
+#define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
+#define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
+#define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
+
+#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
+
+#define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */
+
+#define RN_BITS(i) ((i >> 16) & 15) /* Rn */
+#define RD_BITS(i) ((i >> 12) & 15) /* Rd */
+#define RM_BITS(i) (i & 15) /* Rm */
+
+#define REGMASK_BITS(i) (i & 0xffff)
+
+#define BAD_INSTR 0xdeadc0de
+
+/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
+#define IS_T32(hi16) \
+ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
+
+union offset_union {
+ unsigned long un;
+ signed long sn;
+};
+
+#define TYPE_ERROR 0
+#define TYPE_FAULT 1
+#define TYPE_LDST 2
+#define TYPE_DONE 3
+
+static void
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs,
+ union offset_union offset)
+{
+ if (!LDST_U_BIT(instr))
+ offset.un = -offset.un;
+
+ if (!LDST_P_BIT(instr))
+ addr += offset.un;
+
+ if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
+ regs->regs[RN_BITS(instr)] = addr;
+}
+
+static int
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
+{
+ unsigned int rd = RD_BITS(instr);
+ unsigned int rd2;
+ int load;
+
+ if ((instr & 0xfe000000) == 0xe8000000) {
+ /* ARMv7 Thumb-2 32-bit LDRD/STRD */
+ rd2 = (instr >> 8) & 0xf;
+ load = !!(LDST_L_BIT(instr));
+ } else if (((rd & 1) == 1) || (rd == 14)) {
+ return TYPE_ERROR;
+ } else {
+ load = ((instr & 0xf0) == 0xd0);
+ rd2 = rd + 1;
+ }
+
+ if (load) {
+ unsigned int val, val2;
+
+ if (get_user(val, (u32 __user *)addr) ||
+ get_user(val2, (u32 __user *)(addr + 4)))
+ return TYPE_FAULT;
+ regs->regs[rd] = val;
+ regs->regs[rd2] = val2;
+ } else {
+ if (put_user(regs->regs[rd], (u32 __user *)addr) ||
+ put_user(regs->regs[rd2], (u32 __user *)(addr + 4)))
+ return TYPE_FAULT;
+ }
+ return TYPE_LDST;
+}
+
+/*
+ * LDM/STM alignment handler.
+ *
+ * There are 4 variants of this instruction:
+ *
+ * B = rn pointer before instruction, A = rn pointer after instruction
+ * ------ increasing address ----->
+ * | | r0 | r1 | ... | rx | |
+ * PU = 01 B A
+ * PU = 11 B A
+ * PU = 00 A B
+ * PU = 10 A B
+ */
+static int
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
+{
+ unsigned int rd, rn, nr_regs, regbits;
+ unsigned long eaddr, newaddr;
+ unsigned int val;
+
+ /* count the number of registers in the mask to be transferred */
+ nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
+
+ rn = RN_BITS(instr);
+ newaddr = eaddr = regs->regs[rn];
+
+ if (!LDST_U_BIT(instr))
+ nr_regs = -nr_regs;
+ newaddr += nr_regs;
+ if (!LDST_U_BIT(instr))
+ eaddr = newaddr;
+
+ if (LDST_P_EQ_U(instr)) /* U = P */
+ eaddr += 4;
+
+ for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
+ regbits >>= 1, rd += 1)
+ if (regbits & 1) {
+ if (LDST_L_BIT(instr)) {
+ if (get_user(val, (u32 __user *)eaddr))
+ return TYPE_FAULT;
+ if (rd < 15)
+ regs->regs[rd] = val;
+ else
+ regs->pc = val;
+ } else {
+ /*
+ * The PC register has a bias of +8 in ARM mode
+ * and +4 in Thumb mode. This means that a read
+ * of the value of PC should account for this.
+ * Since Thumb does not permit STM instructions
+ * to refer to PC, just add 8 here.
+ */
+ val = (rd < 15) ? regs->regs[rd] : regs->pc + 8;
+ if (put_user(val, (u32 __user *)eaddr))
+ return TYPE_FAULT;
+ }
+ eaddr += 4;
+ }
+
+ if (LDST_W_BIT(instr))
+ regs->regs[rn] = newaddr;
+
+ return TYPE_DONE;
+}
+
+/*
+ * Convert Thumb multi-word load/store instruction forms to equivalent ARM
+ * instructions so we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * This implementation was initially based on the algorithm found in
+ * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
+ * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
+ *
+ * NOTES:
+ * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
+ * 2. If for some reason we're passed an non-ld/st Thumb instruction to
+ * decode, we return 0xdeadc0de. This should never happen under normal
+ * circumstances but if it does, we've got other problems to deal with
+ * elsewhere and we obviously can't fix those problems here.
+ */
+
+static unsigned long thumb2arm(u16 tinstr)
+{
+ u32 L = (tinstr & (1<<11)) >> 11;
+
+ switch ((tinstr & 0xf800) >> 11) {
+ /* 6.6.1 Format 1: */
+ case 0xc000 >> 11: /* 7.1.51 STMIA */
+ case 0xc800 >> 11: /* 7.1.25 LDMIA */
+ {
+ u32 Rn = (tinstr & (7<<8)) >> 8;
+ u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
+
+ return 0xe8800000 | W | (L<<20) | (Rn<<16) |
+ (tinstr&255);
+ }
+
+ /* 6.6.1 Format 2: */
+ case 0xb000 >> 11: /* 7.1.48 PUSH */
+ case 0xb800 >> 11: /* 7.1.47 POP */
+ if ((tinstr & (3 << 9)) == 0x0400) {
+ static const u32 subset[4] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe92d4000, /* STMDB sp!,{registers,lr} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ 0xe8bd8000 /* LDMIA sp!,{registers,pc} */
+ };
+ return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
+ (tinstr & 255); /* register_list */
+ }
+ fallthrough; /* for illegal instruction case */
+
+ default:
+ return BAD_INSTR;
+ }
+}
+
+/*
+ * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
+ * handlable by ARM alignment handler, also find the corresponding handler,
+ * so that we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * @pinstr: original Thumb-2 instruction; returns new handlable instruction
+ * @regs: register context.
+ * @poffset: return offset from faulted addr for later writeback
+ *
+ * NOTES:
+ * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
+ * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
+ */
+static void *
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
+ union offset_union *poffset)
+{
+ u32 instr = *pinstr;
+ u16 tinst1 = (instr >> 16) & 0xffff;
+ u16 tinst2 = instr & 0xffff;
+
+ switch (tinst1 & 0xffe0) {
+ /* A6.3.5 Load/Store multiple */
+ case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
+ case 0xe8a0: /* ...above writeback version */
+ case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
+ case 0xe920: /* ...above writeback version */
+ /* no need offset decision since handler calculates it */
+ return do_alignment_ldmstm;
+
+ case 0xf840: /* POP/PUSH T3 (single register) */
+ if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
+ u32 L = !!(LDST_L_BIT(instr));
+ const u32 subset[2] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ };
+ *pinstr = subset[L] | (1<<RD_BITS(instr));
+ return do_alignment_ldmstm;
+ }
+ /* Else fall through for illegal instruction case */
+ break;
+
+ /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
+ case 0xe860:
+ case 0xe960:
+ case 0xe8e0:
+ case 0xe9e0:
+ poffset->un = (tinst2 & 0xff) << 2;
+ fallthrough;
+
+ case 0xe940:
+ case 0xe9c0:
+ return do_alignment_ldrdstrd;
+
+ /*
+ * No need to handle load/store instructions up to word size
+ * since ARMv6 and later CPUs can perform unaligned accesses.
+ */
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int alignment_get_arm(struct pt_regs *regs, __le32 __user *ip, u32 *inst)
+{
+ __le32 instr = 0;
+ int fault;
+
+ fault = get_user(instr, ip);
+ if (fault)
+ return fault;
+
+ *inst = __le32_to_cpu(instr);
+ return 0;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, __le16 __user *ip, u16 *inst)
+{
+ __le16 instr = 0;
+ int fault;
+
+ fault = get_user(instr, ip);
+ if (fault)
+ return fault;
+
+ *inst = __le16_to_cpu(instr);
+ return 0;
+}
+
+int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
+{
+ union offset_union offset;
+ unsigned long instrptr;
+ int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
+ unsigned int type;
+ u32 instr = 0;
+ u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
+ int fault;
+
+ instrptr = instruction_pointer(regs);
+
+ if (compat_thumb_mode(regs)) {
+ __le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
+
+ fault = alignment_get_thumb(regs, ptr, &tinstr);
+ if (!fault) {
+ if (IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2;
+ fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
+ instr = ((u32)tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
+ }
+ }
+ } else {
+ fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
+ }
+
+ if (fault)
+ return 1;
+
+ switch (CODING_BITS(instr)) {
+ case 0x00000000: /* 3.13.4 load/store instruction extensions */
+ if (LDSTHD_I_BIT(instr))
+ offset.un = (instr & 0xf00) >> 4 | (instr & 15);
+ else
+ offset.un = regs->regs[RM_BITS(instr)];
+
+ if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
+ (instr & 0x001000f0) == 0x000000f0) /* STRD */
+ handler = do_alignment_ldrdstrd;
+ else
+ return 1;
+ break;
+
+ case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
+ if (thumb2_32b) {
+ offset.un = 0;
+ handler = do_alignment_t32_to_handler(&instr, regs, &offset);
+ } else {
+ offset.un = 0;
+ handler = do_alignment_ldmstm;
+ }
+ break;
+
+ default:
+ return 1;
+ }
+
+ type = handler(addr, instr, regs);
+
+ if (type == TYPE_ERROR || type == TYPE_FAULT)
+ return 1;
+
+ if (type == TYPE_LDST)
+ do_alignment_finish_ldst(addr, instr, regs, offset);
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->pc);
+ arm64_skip_faulting_instruction(regs, isize);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 48a8af97faa9..6b752fe89745 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
@@ -28,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the
* flat identity mapping.
*/
-SYM_CODE_START(cpu_soft_restart)
+SYM_TYPED_FUNC_START(cpu_soft_restart)
mov_q x12, INIT_SCTLR_EL1_MMU_OFF
pre_disable_mmu_workaround
/*
@@ -47,6 +48,6 @@ SYM_CODE_START(cpu_soft_restart)
mov x1, x3 // arg1
mov x2, x4 // arg2
br x8
-SYM_CODE_END(cpu_soft_restart)
+SYM_FUNC_END(cpu_soft_restart)
.popsection
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 53b973b6059f..89ac00084f38 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -121,6 +121,22 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
+static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
+static void __maybe_unused
+cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
+{
+ struct arm64_ftr_reg *regp;
+
+ regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+ if (!regp)
+ return;
+
+ raw_spin_lock(&reg_user_mask_modification);
+ if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
+ regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
+ raw_spin_unlock(&reg_user_mask_modification);
+}
+
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -214,6 +230,11 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2441007
+ {
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2441009
{
/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
@@ -692,6 +713,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2658417
+ {
+ .desc = "ARM erratum 2658417",
+ .capability = ARM64_WORKAROUND_2658417,
+ /* Cortex-A510 r0p0 - r1p1 */
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
+ MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
+ .cpu_enable = cpu_clear_bf16_from_user_emulation,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af4de817d712..b3f37e2209ad 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
-/* Need also bit for ARM64_CB_PATCH */
-DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
@@ -134,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
*/
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
-/*
- * Flag to indicate if we have computed the system wide
- * capabilities based on the boot time active CPUs. This
- * will be used to determine if a new booting CPU should
- * go through the verification process to make sure that it
- * supports the system capabilities, without using a hotplug
- * notifier. This is also used to decide if we could use
- * the fast path for checking constant CPU caps.
- */
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
-EXPORT_SYMBOL(arm64_const_caps_ready);
-static inline void finalize_system_capabilities(void)
-{
- static_branch_enable(&arm64_const_caps_ready);
-}
-
void dump_cpu_features(void)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
}
-DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
-EXPORT_SYMBOL(cpu_hwcap_keys);
-
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
.sign = SIGNED, \
@@ -243,35 +223,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
- S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
- S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -316,9 +296,9 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
/*
* Page size not being supported at Stage-2 is not fatal. You
* just give up KVM if PAGE_SIZE isn't supported there. Go fix
@@ -334,9 +314,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* fields are inconsistent across vCPUs, then it isn't worth
* trying to bring KVM up.
*/
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
/*
* We already refuse to boot CPUs that don't support our configured
* page size, so we can only detect mismatches for a page size other
@@ -344,55 +324,55 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* exist in the wild so, even though we don't like it, we'll have to go
* along with it and treat them as non-strict.
*/
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
/*
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
*/
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -434,17 +414,41 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
/*
* We can instantiate multiple PMU instances with different levels
* of support.
*/
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_mvfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_mvfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -478,10 +482,10 @@ static const struct arm64_ftr_bits ftr_id_isar0[] = {
static const struct arm64_ftr_bits ftr_id_isar5[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -594,7 +598,7 @@ static const struct arm64_ftr_bits ftr_smcr[] = {
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
* 0. Covers the following 32bit registers:
- * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
+ * id_isar[1-3], id_mmfr[1-3]
*/
static const struct arm64_ftr_bits ftr_generic_32bits[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
@@ -665,8 +669,8 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
/* Op1 = 0, CRn = 0, CRm = 3 */
- ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
- ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0),
+ ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1),
ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
@@ -750,7 +754,7 @@ static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
* returns - Upon success, matching ftr_reg entry for id.
* - NULL on failure but with an WARN_ON().
*/
-static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
+struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
{
struct arm64_ftr_reg *reg;
@@ -1392,6 +1396,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
#include <linux/irqchip/arm-gic-v3.h>
static bool
+has_always(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ return true;
+}
+
+static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
int val = cpuid_feature_extract_field_width(reg, entry->field_pos,
@@ -1401,17 +1411,40 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
return val >= entry->min_field_value;
}
-static bool
-has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
+static u64
+read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope)
{
- u64 val;
-
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
- val = read_sanitised_ftr_reg(entry->sys_reg);
+ return read_sanitised_ftr_reg(entry->sys_reg);
else
- val = __read_sysreg_by_encoding(entry->sys_reg);
+ return __read_sysreg_by_encoding(entry->sys_reg);
+}
+
+static bool
+has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ int mask;
+ struct arm64_ftr_reg *regp;
+ u64 val = read_scoped_sysreg(entry, scope);
+
+ regp = get_arm64_ftr_reg(entry->sys_reg);
+ if (!regp)
+ return false;
+
+ mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask,
+ entry->field_pos,
+ entry->field_width);
+ if (!mask)
+ return false;
+
+ return feature_matches(val, entry);
+}
+static bool
+has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u64 val = read_scoped_sysreg(entry, scope);
return feature_matches(val, entry);
}
@@ -1492,7 +1525,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_signed_field(pfr0,
- ID_AA64PFR0_FP_SHIFT) < 0;
+ ID_AA64PFR0_EL1_FP_SHIFT) < 0;
}
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
@@ -1571,7 +1604,7 @@ bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
if (cpuid_feature_extract_unsigned_field(mmfr2,
- ID_AA64MMFR2_E0PD_SHIFT))
+ ID_AA64MMFR2_EL1_E0PD_SHIFT))
return false;
}
@@ -1685,7 +1718,7 @@ static phys_addr_t kpti_ng_pgd_alloc(int shift)
return kpti_ng_temp_alloc;
}
-static void __nocfi
+static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
@@ -1713,7 +1746,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (arm64_use_ng_mappings)
return;
- remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
if (!cpu) {
alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
@@ -2034,7 +2067,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
- isb();
+
+ mte_cpu_setup();
/*
* Clear the tags in the zero page. This needs to be done via the
@@ -2088,12 +2122,22 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
static const struct arm64_cpu_capabilities arm64_features[] = {
{
+ .capability = ARM64_ALWAYS_BOOT,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_always,
+ },
+ {
+ .capability = ARM64_ALWAYS_SYSTEM,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_always,
+ },
+ {
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -2104,7 +2148,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR0_EL1,
- .field_pos = ID_AA64MMFR0_ECV_SHIFT,
+ .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -2116,7 +2160,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
- .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -2130,7 +2174,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
- .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 3,
@@ -2168,9 +2212,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL0_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
+ .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
},
#ifdef CONFIG_KVM
{
@@ -2180,9 +2224,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
+ .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
},
{
.desc = "Protected KVM",
@@ -2201,7 +2245,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
* more details.
*/
.sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_CSV3_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = unmap_kernel_at_el0,
@@ -2244,9 +2288,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SVE,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_SVE_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR0_SVE,
+ .min_field_value = ID_AA64PFR0_EL1_SVE_IMP,
.matches = has_cpuid_feature,
.cpu_enable = sve_kernel_enable,
},
@@ -2259,9 +2303,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_RAS_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR0_RAS_V1,
+ .min_field_value = ID_AA64PFR0_EL1_RAS_IMP,
.cpu_enable = cpu_clear_disr,
},
#endif /* CONFIG_ARM64_RAS_EXTN */
@@ -2278,9 +2322,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_amu,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_AMU_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR0_AMU,
+ .min_field_value = ID_AA64PFR0_EL1_AMU_IMP,
.cpu_enable = cpu_amu_enable,
},
#endif /* CONFIG_ARM64_AMU_EXTN */
@@ -2303,7 +2347,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_STAGE2_FWB,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_FWB_SHIFT,
+ .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = has_cpuid_feature,
@@ -2314,7 +2358,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_ARMv8_4_TTL,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_TTL_SHIFT,
+ .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = has_cpuid_feature,
@@ -2344,7 +2388,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HW_DBM,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
+ .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
.field_width = 4,
.min_field_value = 2,
.matches = has_hw_dbm,
@@ -2367,10 +2411,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
- .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+ .min_field_value = ID_AA64PFR1_EL1_SSBS_IMP,
},
#ifdef CONFIG_ARM64_CNP
{
@@ -2380,7 +2424,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_useable_cnp,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_CNP_SHIFT,
+ .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT,
.field_width = 4,
.min_field_value = 1,
.cpu_enable = cpu_enable_cnp,
@@ -2485,7 +2529,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities,
.sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -2499,7 +2543,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_width = 4,
- .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+ .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT,
.matches = has_cpuid_feature,
.min_field_value = 1,
.cpu_enable = cpu_enable_e0pd,
@@ -2528,9 +2572,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.cpu_enable = bti_enable,
.sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_BT_SHIFT,
+ .field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR1_BT_BTI,
+ .min_field_value = ID_AA64PFR1_EL1_BT_IMP,
.sign = FTR_UNSIGNED,
},
#endif
@@ -2541,9 +2585,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR1_MTE,
+ .min_field_value = ID_AA64PFR1_EL1_MTE_MTE2,
.sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte,
},
@@ -2553,9 +2597,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR1_MTE_ASYMM,
+ .min_field_value = ID_AA64PFR1_EL1_MTE_MTE3,
.sign = FTR_UNSIGNED,
},
#endif /* CONFIG_ARM64_MTE */
@@ -2577,9 +2621,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SME,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR1_SME_SHIFT,
+ .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64PFR1_SME,
+ .min_field_value = ID_AA64PFR1_EL1_SME_IMP,
.matches = has_cpuid_feature,
.cpu_enable = sme_kernel_enable,
},
@@ -2614,9 +2658,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR1_TIDCP1_SHIFT,
+ .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64MMFR1_TIDCP1_IMP,
+ .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP,
.matches = has_cpuid_feature,
.cpu_enable = cpu_trap_el0_impdef,
},
@@ -2624,7 +2668,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
};
#define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \
- .matches = has_cpuid_feature, \
+ .matches = has_user_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.field_width = width, \
@@ -2708,11 +2752,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
@@ -2725,38 +2769,39 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
- HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI),
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
#ifdef CONFIG_ARM64_MTE
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
#endif /* CONFIG_ARM64_MTE */
- HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
- HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
@@ -2929,9 +2974,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
if (!cpus_have_cap(num))
continue;
- /* Ensure cpus_have_const_cap(num) works */
- static_branch_enable(&cpu_hwcap_keys[num]);
-
if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
@@ -3102,7 +3144,7 @@ static void verify_hyp_capabilities(void)
/* Verify IPA range */
parange = cpuid_feature_extract_unsigned_field(mmfr0,
- ID_AA64MMFR0_PARANGE_SHIFT);
+ ID_AA64MMFR0_EL1_PARANGE_SHIFT);
ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
if (ipa_max < get_kvm_ipa_limit()) {
pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
@@ -3253,9 +3295,6 @@ void __init setup_cpu_features(void)
sme_setup();
minsigstksz_setup();
- /* Advertise that we have computed the system capabilities */
- finalize_system_capabilities();
-
/*
* Check for sane CTR_EL0.CWG value.
*/
@@ -3324,7 +3363,7 @@ static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *c
/*
* We emulate only the following system register space.
- * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
+ * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7]
* See Table C5-6 System instruction encodings for System register accesses,
* ARMv8 ARM(ARM DDI 0487A.f) for more details.
*/
@@ -3334,7 +3373,7 @@ static inline bool __attribute_const__ is_emulated(u32 id)
sys_reg_CRn(id) == 0x0 &&
sys_reg_Op1(id) == 0x0 &&
(sys_reg_CRm(id) == 0 ||
- ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
+ ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7))));
}
/*
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index d7702f39b4d3..28d4f442b0bc 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -115,6 +115,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_FA64] = "smefa64",
[KERNEL_HWCAP_WFXT] = "wfxt",
[KERNEL_HWCAP_EBF16] = "ebf16",
+ [KERNEL_HWCAP_SVE_EBF16] = "sveebf16",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index bf9fe71589bc..3da09778267e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -28,7 +28,7 @@
u8 debug_monitors_arch(void)
{
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
- ID_AA64DFR0_DEBUGVER_SHIFT);
+ ID_AA64DFR0_EL1_DebugVer_SHIFT);
}
/*
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
index 75691a2641c1..67babd5f04c2 100644
--- a/arch/arm64/kernel/efi-rt-wrapper.S
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -6,7 +6,7 @@
#include <linux/linkage.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
- stp x29, x30, [sp, #-32]!
+ stp x29, x30, [sp, #-112]!
mov x29, sp
/*
@@ -17,6 +17,20 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x1, x18, [sp, #16]
/*
+ * Preserve all callee saved registers and record the stack pointer
+ * value in a per-CPU variable so we can recover from synchronous
+ * exceptions occurring while running the firmware routines.
+ */
+ stp x19, x20, [sp, #32]
+ stp x21, x22, [sp, #48]
+ stp x23, x24, [sp, #64]
+ stp x25, x26, [sp, #80]
+ stp x27, x28, [sp, #96]
+
+ adr_this_cpu x8, __efi_rt_asm_recover_sp, x9
+ str x29, [x8]
+
+ /*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
* stack.
@@ -31,7 +45,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
ldp x1, x2, [sp, #16]
cmp x2, x18
- ldp x29, x30, [sp], #32
+ ldp x29, x30, [sp], #112
b.ne 0f
ret
0:
@@ -45,3 +59,18 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x18, x2
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)
+
+SYM_FUNC_START(__efi_rt_asm_recover)
+ ldr_this_cpu x8, __efi_rt_asm_recover_sp, x9
+ mov sp, x8
+
+ ldp x0, x18, [sp, #16]
+ ldp x19, x20, [sp, #32]
+ ldp x21, x22, [sp, #48]
+ ldp x23, x24, [sp, #64]
+ ldp x25, x26, [sp, #80]
+ ldp x27, x28, [sp, #96]
+ ldp x29, x30, [sp], #112
+
+ b efi_handle_runtime_exception
+SYM_FUNC_END(__efi_rt_asm_recover)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e1be6c429810..ee53f2a0aa03 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -9,9 +9,18 @@
#include <linux/efi.h>
#include <linux/init.h>
+#include <linux/percpu.h>
#include <asm/efi.h>
+static bool region_is_misaligned(const efi_memory_desc_t *md)
+{
+ if (PAGE_SIZE == EFI_PAGE_SIZE)
+ return false;
+ return !PAGE_ALIGNED(md->phys_addr) ||
+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
+}
+
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
@@ -25,14 +34,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
- if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
- "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
+ if (region_is_misaligned(md)) {
+ static bool __initdata code_is_misaligned;
+
/*
- * If the region is not aligned to the page size of the OS, we
- * can not use strict permissions, since that would also affect
- * the mapping attributes of the adjacent regions.
+ * Regions that are not aligned to the OS page size cannot be
+ * mapped with strict permissions, as those might interfere
+ * with the permissions that are needed by the adjacent
+ * region's mapping. However, if we haven't encountered any
+ * misaligned runtime code regions so far, we can safely use
+ * non-executable permissions for non-code regions.
*/
- return pgprot_val(PAGE_KERNEL_EXEC);
+ code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
+
+ return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
+ : pgprot_val(PAGE_KERNEL);
+ }
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -63,19 +80,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
- if (!PAGE_ALIGNED(md->phys_addr) ||
- !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
- /*
- * If the end address of this region is not aligned to page
- * size, the mapping is rounded up, and may end up sharing a
- * page frame with the next UEFI memory region. If we create
- * a block entry now, we may need to split it again when mapping
- * the next region, and support for that is going to be removed
- * from the MMU routines. So avoid block mappings altogether in
- * that case.
- */
+ /*
+ * If this region is not aligned to the page size used by the OS, the
+ * mapping will be rounded outwards, and may end up sharing a page
+ * frame with an adjacent runtime memory region. Given that the page
+ * table descriptor covering the shared page will be rewritten when the
+ * adjacent region gets mapped, we must avoid block mappings here so we
+ * don't have to worry about splitting them when that happens.
+ */
+ if (region_is_misaligned(md))
page_mappings_only = true;
- }
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@@ -102,6 +116,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
+ if (region_is_misaligned(md))
+ return 0;
+
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called
@@ -128,3 +145,28 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
return s;
}
+
+asmlinkage DEFINE_PER_CPU(u64, __efi_rt_asm_recover_sp);
+
+asmlinkage efi_status_t __efi_rt_asm_recover(void);
+
+asmlinkage efi_status_t efi_handle_runtime_exception(const char *f)
+{
+ pr_err(FW_BUG "Synchronous exception occurred in EFI runtime service %s()\n", f);
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return EFI_ABORTED;
+}
+
+bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
+{
+ /* Check whether the exception occurred while running the firmware */
+ if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
+ return false;
+
+ pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ dump_stack();
+
+ regs->pc = (u64)__efi_rt_asm_recover;
+ return true;
+}
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 98d67444a5b6..27ef7ad3ffd2 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -8,9 +8,9 @@
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#define for_each_mte_vma(tsk, vma) \
+#define for_each_mte_vma(vmi, vma) \
if (system_supports_mte()) \
- for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \
+ for_each_vma(vmi, vma) \
if (vma->vm_flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -81,8 +81,9 @@ Elf_Half elf_core_extra_phdrs(void)
{
struct vm_area_struct *vma;
int vma_count = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
vma_count++;
return vma_count;
@@ -91,8 +92,9 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
struct elf_phdr phdr;
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
@@ -116,8 +118,9 @@ size_t elf_core_extra_data_size(void)
{
struct vm_area_struct *vma;
size_t data_size = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
data_size += mte_vma_tag_dump_size(vma);
return data_size;
@@ -126,8 +129,9 @@ size_t elf_core_extra_data_size(void)
int elf_core_write_extra_data(struct coredump_params *cprm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index c75ca36b4a49..27369fa1c032 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -329,7 +329,8 @@ static void cortex_a76_erratum_1463225_svc_handler(void)
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
}
-static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+static __always_inline bool
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
return false;
@@ -379,11 +380,20 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_undef(struct pt_regs *regs)
+static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_undefinstr(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
- do_undefinstr(regs);
+ do_el1_bti(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@@ -402,7 +412,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
- do_ptrauth_fault(regs, esr);
+ do_el1_fpac(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@@ -425,7 +435,10 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN:
- el1_undef(regs);
+ el1_undef(regs, esr);
+ break;
+ case ESR_ELx_EC_BTI:
+ el1_bti(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR:
@@ -582,11 +595,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
-static void noinstr el0_undef(struct pt_regs *regs)
+static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
exit_to_user_mode(regs);
}
@@ -594,7 +607,7 @@ static void noinstr el0_bti(struct pt_regs *regs)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_bti(regs);
+ do_el0_bti(regs);
exit_to_user_mode(regs);
}
@@ -629,7 +642,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
- do_ptrauth_fault(regs, esr);
+ do_el0_fpac(regs, esr);
exit_to_user_mode(regs);
}
@@ -670,7 +683,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
- el0_undef(regs);
+ el0_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el0_bti(regs);
@@ -788,7 +801,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64:
- el0_undef(regs);
+ el0_undef(regs, esr);
break;
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index bd5df50e4643..795344ab4ec4 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/ftrace.h>
@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-SYM_FUNC_START(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub)
ret
SYM_FUNC_END(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ ret
+SYM_FUNC_END(ftrace_stub_graph)
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void return_to_handler(void)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2d73b3e793b2..e28137d64b76 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -114,7 +114,7 @@
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-alternative_cb spectre_v4_patch_fw_mitigation_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
@@ -123,7 +123,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
-alternative_cb smccc_patch_fw_mitigation_conduit
+alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
@@ -175,7 +175,7 @@ alternative_else_nop_endif
.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
-alternative_cb kasan_hw_tags_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
alternative_cb_end
mov \tmp, KERNEL_GCR_EL1
@@ -186,7 +186,7 @@ alternative_cb_end
.macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
-alternative_cb kasan_hw_tags_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index ea5dc7c90f46..8745175f4a75 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -56,7 +56,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned long pc;
u32 new;
- pc = (unsigned long)function_nocfi(ftrace_call);
+ pc = (unsigned long)ftrace_call;
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
@@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old = 0, new;
+ new = aarch64_insn_gen_nop();
+
+ /*
+ * When using mcount, callsites in modules may have been initalized to
+ * call an arbitrary module PLT (which redirects to the _mcount stub)
+ * rather than the ftrace PLT we'll use at runtime (which redirects to
+ * the ftrace trampoline). We can ignore the old PLT when initializing
+ * the callsite.
+ *
+ * Note: 'mod' is only set at module load time.
+ */
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
+ IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
+ return aarch64_insn_patch_text_nosync((void *)pc, new);
+ }
+
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
- new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 814b6587ccb7..2196aad7b55b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry)
*/
#if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1
- tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT
+ tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
mov x0, #VA_BITS
mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq
@@ -658,10 +658,10 @@ SYM_FUNC_END(__secondary_too_slow)
*/
SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1
- ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
- cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+ ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
+ cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support
- cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+ cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
@@ -679,7 +679,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1
- and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz x0, 2f
update_early_cpu_boot_status \
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 12c7fad02ae5..2ee18c860f2a 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2)
- check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve
+ check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps
@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve:
- check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme
+ check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps
@@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
- ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
+ ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme
mrs_s x1, SYS_HCRX_EL2
@@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f
// Needs to be VHE capable, obviously
- check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f
+ check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f
1: mov_q x0, HVC_STUB_ERR
eret
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 1b0542c69738..95133765ed29 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override,
.fields = {
- FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter),
+ FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter),
{}
},
};
@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0",
.override = &id_aa64pfr0_override,
.fields = {
- FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter),
+ FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{}
},
};
@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1",
.override = &id_aa64pfr1_override,
.fields = {
- FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ),
- FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL),
- FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter),
+ FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
+ FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
+ FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{}
},
};
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index afa69e04e75e..8151412653de 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -24,9 +24,6 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
*/
PROVIDE(__efistub_memcmp = __pi_memcmp);
PROVIDE(__efistub_memchr = __pi_memchr);
-PROVIDE(__efistub_memcpy = __pi_memcpy);
-PROVIDE(__efistub_memmove = __pi_memmove);
-PROVIDE(__efistub_memset = __pi_memset);
PROVIDE(__efistub_strlen = __pi_strlen);
PROVIDE(__efistub_strnlen = __pi_strnlen);
PROVIDE(__efistub_strcmp = __pi_strcmp);
@@ -40,16 +37,6 @@ PROVIDE(__efistub__edata = _edata);
PROVIDE(__efistub_screen_info = screen_info);
PROVIDE(__efistub__ctype = _ctype);
-/*
- * The __ prefixed memcpy/memset/memmove symbols are provided by KASAN, which
- * instruments the conventional ones. Therefore, any references from the EFI
- * stub or other position independent, low level C code should be redirected to
- * the non-instrumented versions as well.
- */
-PROVIDE(__efistub___memcpy = __pi_memcpy);
-PROVIDE(__efistub___memmove = __pi_memmove);
-PROVIDE(__efistub___memset = __pi_memset);
-
PROVIDE(__pi___memcpy = __pi_memcpy);
PROVIDE(__pi___memmove = __pi_memmove);
PROVIDE(__pi___memset = __pi_memset);
@@ -73,6 +60,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
+KVM_NVHE_ALIAS(alt_cb_patch_nops);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
@@ -89,10 +77,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* VMID bits set by the KVM VMID allocator */
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
-/* Kernel symbols needed for cpus_have_final/const_caps checks. */
-KVM_NVHE_ALIAS(arm64_const_caps_ready);
-KVM_NVHE_ALIAS(cpu_hwcap_keys);
-
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index bda49430c9ea..38dbd3828f13 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -21,7 +21,9 @@
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <asm/daifflags.h>
+#include <asm/exception.h>
#include <asm/vmap_stack.h>
+#include <asm/softirq_stack.h>
/* Only access this in an NMI enter/exit */
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
@@ -71,6 +73,18 @@ static void init_irq_stacks(void)
}
#endif
+#ifndef CONFIG_PREEMPT_RT
+static void ____do_softirq(struct pt_regs *regs)
+{
+ __do_softirq();
+}
+
+void do_softirq_own_stack(void)
+{
+ call_on_irq_stack(NULL, ____do_softirq);
+}
+#endif
+
static void default_handle_irq(struct pt_regs *regs)
{
panic("IRQ taken without a root IRQ handler\n");
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 19c2d487cb08..ce3d40120f72 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -204,7 +204,7 @@ void machine_kexec(struct kimage *kimage)
typeof(cpu_soft_restart) *restart;
cpu_install_idmap();
- restart = (void *)__pa_symbol(function_nocfi(cpu_soft_restart));
+ restart = (void *)__pa_symbol(cpu_soft_restart);
restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
0, 0);
} else {
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index a3d0494f25a9..5a0a8f552a61 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -37,7 +37,8 @@ struct plt_entry get_plt_entry(u64 dst, void *pc)
return plt;
}
-bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
+static bool plt_entries_equal(const struct plt_entry *a,
+ const struct plt_entry *b)
{
u64 p, q;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f2d4bb14bfab..76b41e4ca9fa 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -476,21 +476,6 @@ overflow:
return -ENOEXEC;
}
-static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- const char *name)
-{
- const Elf_Shdr *s, *se;
- const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-
- for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
- if (strcmp(name, secstrs + s->sh_name) == 0)
- return s;
- }
-
- return NULL;
-}
-
static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
{
*plt = get_plt_entry(addr, plt);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index b2b730233274..7467217c1eaf 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -48,7 +48,12 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (!pte_is_tagged)
return;
- mte_clear_page_tags(page_address(page));
+ /*
+ * Test PG_mte_tagged again in case it was racing with another
+ * set_pte_at().
+ */
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_clear_page_tags(page_address(page));
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
@@ -64,7 +69,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
- if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ if (!test_bit(PG_mte_tagged, &page->flags))
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
}
@@ -285,6 +290,49 @@ void mte_thread_switch(struct task_struct *next)
mte_check_tfsr_el1();
}
+void mte_cpu_setup(void)
+{
+ u64 rgsr;
+
+ /*
+ * CnP must be enabled only after the MAIR_EL1 register has been set
+ * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
+ * lead to the wrong memory type being used for a brief window during
+ * CPU power-up.
+ *
+ * CnP is not a boot feature so MTE gets enabled before CnP, but let's
+ * make sure that is the case.
+ */
+ BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
+ BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
+
+ /* Normal Tagged memory type at the corresponding MAIR index */
+ sysreg_clear_set(mair_el1,
+ MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
+ MT_NORMAL_TAGGED));
+
+ write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
+
+ /*
+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+ * RGSR_EL1.SEED must be non-zero for IRG to produce
+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+ * must initialize it.
+ */
+ rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
+ SYS_RGSR_EL1_SEED_SHIFT;
+ if (rgsr == 0)
+ rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
+ write_sysreg_s(rgsr, SYS_RGSR_EL1);
+
+ /* clear any pending tag check faults in TFSR*_EL1 */
+ write_sysreg_s(0, SYS_TFSR_EL1);
+ write_sysreg_s(0, SYS_TFSRE0_EL1);
+
+ local_flush_tlb_all();
+}
+
void mte_suspend_enter(void)
{
if (!system_supports_mte())
@@ -301,6 +349,14 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1();
}
+void mte_suspend_exit(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ mte_cpu_setup();
+}
+
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cb69ff1e6138..7b0643fe2f13 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{
- return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
+ return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
}
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
- ID_AA64DFR0_PMUVER_SHIFT);
- if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return;
cpu_pmu->pmuver = pmuver;
@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */
- if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
+ if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else
cpu_pmu->reg_pmmir = 0;
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index f6f58e6265df..b4eece3eb17d 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -9,9 +9,27 @@
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
+static u64 perf_ext_regs_value(int idx)
+{
+ switch (idx) {
+ case PERF_REG_ARM64_VG:
+ if (WARN_ON_ONCE(!system_supports_sve()))
+ return 0;
+
+ /*
+ * Vector granule is current length in bits of SVE registers
+ * divided by 64.
+ */
+ return (task_get_sve_vl(current) * 8) / 64;
+ default:
+ WARN_ON_ONCE(true);
+ return 0;
+ }
+}
+
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
- if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_EXTENDED_MAX))
return 0;
/*
@@ -51,6 +69,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
if ((u32)idx == PERF_REG_ARM64_PC)
return regs->pc;
+ if ((u32)idx >= PERF_REG_ARM64_MAX)
+ return perf_ext_regs_value(idx);
+
return regs->regs[idx];
}
@@ -58,7 +79,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
int perf_reg_validate(u64 mask)
{
- if (!mask || mask & REG_RESERVED)
+ u64 reserved_mask = REG_RESERVED;
+
+ if (system_supports_sve())
+ reserved_mask &= ~(1ULL << PERF_REG_ARM64_VG);
+
+ if (!mask || mask & reserved_mask)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d1d182320245..c9e4d0720285 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -44,13 +44,28 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
kprobe_opcode_t *addr = p->ainsn.api.insn;
- void *addrs[] = {addr, addr + 1};
- u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
- /* prepare insn slot */
- aarch64_insn_patch_text(addrs, insns, 2);
-
- flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
+ /*
+ * Prepare insn slot, Mark Rutland points out it depends on a coupe of
+ * subtleties:
+ *
+ * - That the I-cache maintenance for these instructions is complete
+ * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
+ * ensures this, but just omits causing a Context-Synchronization-Event
+ * on all CPUS).
+ *
+ * - That the kprobe BRK results in an exception (and consequently a
+ * Context-Synchronoization-Event), which ensures that the CPU will
+ * fetch thesingle-step slot instructions *after* this, ensuring that
+ * the new instructions are used
+ *
+ * It supposes to place ISB after patching to guarantee I-cache maintenance
+ * is observed on all CPUS, however, single-step slot is installed in
+ * the BRK exception handler, so it is unnecessary to generate
+ * Contex-Synchronization-Event via ISB again.
+ */
+ aarch64_insn_patch_text_nosync(addr, p->opcode);
+ aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
/*
* Needs restoring of return address after stepping xol.
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 92bcc1768f0b..044a7d7f1f6a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -279,10 +279,6 @@ void flush_thread(void)
flush_tagged_addr_state();
}
-void release_thread(struct task_struct *dead_task)
-{
-}
-
void arch_release_task_struct(struct task_struct *tsk)
{
fpsimd_release_task(tsk);
@@ -595,7 +591,7 @@ unsigned long __get_wchan(struct task_struct *p)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 40be3a7c2c53..bfce41c2a53b 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */
@@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
if (spectre_v4_mitigations_off())
return;
- if (cpus_have_final_cap(ARM64_SSBS))
+ if (cpus_have_cap(ARM64_SSBS))
return;
if (spectre_v4_mitigations_dynamic())
@@ -868,6 +868,10 @@ u8 spectre_bhb_loop_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
{},
};
+ static const struct midr_range spectre_bhb_k11_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ {},
+ };
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -878,6 +882,8 @@ u8 spectre_bhb_loop_affected(int scope)
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+ k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;
@@ -945,7 +951,7 @@ static bool supports_ecbhb(int scope)
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
- ID_AA64MMFR1_ECBHB_SHIFT);
+ ID_AA64MMFR1_EL1_ECBHB_SHIFT);
}
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
@@ -988,6 +994,14 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
isb();
}
+static bool __read_mostly __nospectre_bhb;
+static int __init parse_spectre_bhb_param(char *str)
+{
+ __nospectre_bhb = true;
+ return 0;
+}
+early_param("nospectre_bhb", parse_spectre_bhb_param);
+
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cpu_cb;
@@ -1001,7 +1015,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
/* No point mitigating Spectre-BHB alone. */
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
- } else if (cpu_mitigations_off()) {
+ } else if (cpu_mitigations_off() || __nospectre_bhb) {
pr_info_once("spectre-bhb mitigation disabled by command line option\n");
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
state = SPECTRE_MITIGATED;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index ab7f4c476104..29a8e444db83 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -38,7 +38,7 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- phys_addr_t pa_secondary_entry = __pa_symbol(function_nocfi(secondary_entry));
+ phys_addr_t pa_secondary_entry = __pa_symbol(secondary_entry);
int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 041d2ae5c30a..c2fb5755bbec 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, sizeof(unsigned long), NULL);
+ on_irq_stack(addr, sizeof(unsigned long));
}
/**
@@ -666,10 +666,18 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
static int tls_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
+ int ret;
+
if (target == current)
tls_preserve_current_state();
- return membuf_store(&to, target->thread.uw.tp_value);
+ ret = membuf_store(&to, target->thread.uw.tp_value);
+ if (system_supports_tpidr2())
+ ret = membuf_store(&to, target->thread.tpidr2_el0);
+ else
+ ret = membuf_zero(&to, sizeof(u64));
+
+ return ret;
}
static int tls_set(struct task_struct *target, const struct user_regset *regset,
@@ -677,13 +685,20 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- unsigned long tls = target->thread.uw.tp_value;
+ unsigned long tls[2];
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ tls[0] = target->thread.uw.tp_value;
+ if (system_supports_sme())
+ tls[1] = target->thread.tpidr2_el0;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
if (ret)
return ret;
- target->thread.uw.tp_value = tls;
+ target->thread.uw.tp_value = tls[0];
+ if (system_supports_sme())
+ target->thread.tpidr2_el0 = tls[1];
+
return ret;
}
@@ -1390,7 +1405,7 @@ static const struct user_regset aarch64_regsets[] = {
},
[REGSET_TLS] = {
.core_note_type = NT_ARM_TLS,
- .n = 1,
+ .n = 2,
.size = sizeof(void *),
.align = sizeof(void *),
.regset_get = tls_get,
diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c
index e87a2b7f20f6..99f2ffe9fc05 100644
--- a/arch/arm64/kernel/reloc_test_core.c
+++ b/arch/arm64/kernel/reloc_test_core.c
@@ -48,7 +48,7 @@ static struct {
{ "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel },
};
-static int reloc_test_init(void)
+static int __init reloc_test_init(void)
{
int i;
@@ -67,7 +67,7 @@ static int reloc_test_init(void)
return 0;
}
-static void reloc_test_exit(void)
+static void __exit reloc_test_exit(void)
{
}
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index d20620a1c51a..d56e170e1ca7 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -162,38 +162,6 @@ static int init_sdei_scs(void)
return err;
}
-static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
- struct stack_info *info)
-{
- unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
- unsigned long high = low + SDEI_STACK_SIZE;
-
- return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
-}
-
-static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
- struct stack_info *info)
-{
- unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
- unsigned long high = low + SDEI_STACK_SIZE;
-
- return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
-}
-
-bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
-{
- if (!IS_ENABLED(CONFIG_VMAP_STACK))
- return false;
-
- if (on_sdei_critical_stack(sp, size, info))
- return true;
-
- if (on_sdei_normal_stack(sp, size, info))
- return true;
-
- return false;
-}
-
unsigned long sdei_arch_get_entry_point(int conduit)
{
/*
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7e1624ecab3c..49029eace3ad 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -66,7 +66,7 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
__le64 __iomem *release_addr;
- phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen));
+ phys_addr_t pa_holding_pen = __pa_symbol(secondary_holding_pen);
if (!cpu_release_addr[cpu])
return -ENODEV;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ce190ee18a20..634279b3b03d 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -68,31 +68,6 @@ static inline void unwind_init_from_task(struct unwind_state *state,
}
/*
- * We can only safely access per-cpu stacks from current in a non-preemptible
- * context.
- */
-static bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp, unsigned long size,
- struct stack_info *info)
-{
- if (info)
- info->type = STACK_TYPE_UNKNOWN;
-
- if (on_task_stack(tsk, sp, size, info))
- return true;
- if (tsk != current || preemptible())
- return false;
- if (on_irq_stack(sp, size, info))
- return true;
- if (on_overflow_stack(sp, size, info))
- return true;
- if (on_sdei_stack(sp, size, info))
- return true;
-
- return false;
-}
-
-/*
* Unwind from one frame record (A) to the next frame record (B).
*
* We terminate early if the location of B indicates a malformed chain of frame
@@ -103,14 +78,13 @@ static int notrace unwind_next(struct unwind_state *state)
{
struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
- struct stack_info info;
int err;
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
- err = unwind_next_common(state, &info, on_accessible_stack, NULL);
+ err = unwind_next_frame_record(state);
if (err)
return err;
@@ -190,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
+/*
+ * Per-cpu stacks are only accessible when unwinding the current task in a
+ * non-preemptible context.
+ */
+#define STACKINFO_CPU(name) \
+ ({ \
+ ((task == current) && !preemptible()) \
+ ? stackinfo_get_##name() \
+ : stackinfo_get_unknown(); \
+ })
+
+/*
+ * SDEI stacks are only accessible when unwinding the current task in an NMI
+ * context.
+ */
+#define STACKINFO_SDEI(name) \
+ ({ \
+ ((task == current) && in_nmi()) \
+ ? stackinfo_get_sdei_##name() \
+ : stackinfo_get_unknown(); \
+ })
+
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
- struct unwind_state state;
+ struct stack_info stacks[] = {
+ stackinfo_get_task(task),
+ STACKINFO_CPU(irq),
+#if defined(CONFIG_VMAP_STACK)
+ STACKINFO_CPU(overflow),
+#endif
+#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
+ STACKINFO_SDEI(normal),
+ STACKINFO_SDEI(critical),
+#endif
+ };
+ struct unwind_state state = {
+ .stacks = stacks,
+ .nr_stacks = ARRAY_SIZE(stacks),
+ };
if (regs) {
if (task != current)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 9135fe0f3df5..8b02d310838f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void)
{
unsigned int cpu = smp_processor_id();
+ mte_suspend_exit();
+
/*
* We are resuming from reset with the idmap active in TTBR0_EL1.
* We must uninstall the idmap and restore the expected MMU
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 733451fe7e41..d72e8f23422d 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -67,7 +67,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
*
* The resulting 5 bits of entropy is seen in SP[8:4].
*/
- choose_random_kstack_offset(get_random_int() & 0x1FF);
+ choose_random_kstack_offset(get_random_u16() & 0x1FF);
}
static inline bool has_syscall_work(unsigned long flags)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index ad2bfc794257..817d788cd866 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -22,46 +22,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
-void store_cpu_topology(unsigned int cpuid)
-{
- struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
- u64 mpidr;
-
- if (cpuid_topo->package_id != -1)
- goto topology_populated;
-
- mpidr = read_cpuid_mpidr();
-
- /* Uniprocessor systems can rely on default topology values */
- if (mpidr & MPIDR_UP_BITMASK)
- return;
-
- /*
- * This would be the place to create cpu topology based on MPIDR.
- *
- * However, it cannot be trusted to depict the actual topology; some
- * pieces of the architecture enforce an artificial cap on Aff0 values
- * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
- * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
- * having absolutely no relationship to the actual underlying system
- * topology, and cannot be reasonably used as core / package ID.
- *
- * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
- * we still wouldn't be able to obtain a sane core ID. This means we
- * need to entirely ignore MPIDR for any topology deduction.
- */
- cpuid_topo->thread_id = -1;
- cpuid_topo->core_id = cpuid;
- cpuid_topo->package_id = cpu_to_node(cpuid);
-
- pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
- cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
- cpuid_topo->thread_id, mpidr);
-
-topology_populated:
- update_siblings_masks(cpuid);
-}
-
#ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu)
{
@@ -237,7 +197,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
for_each_cpu(cpu, cpus) {
if (!freq_counters_valid(cpu) ||
freq_inv_set_max_ratio(cpu,
- cpufreq_get_hw_max_freq(cpu) * 1000,
+ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
arch_timer_get_rate()))
return;
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b7fed33981f7..23d281ed7621 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/mm_types.h>
#include <linux/kasan.h>
+#include <linux/cfi.h>
#include <asm/atomic.h>
#include <asm/bug.h>
@@ -180,12 +181,12 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct pt_regs *regs)
+static int __die(const char *str, long err, struct pt_regs *regs)
{
static int die_counter;
int ret;
- pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
+ pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
@@ -206,7 +207,7 @@ static DEFINE_RAW_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
*/
-void die(const char *str, struct pt_regs *regs, int err)
+void die(const char *str, struct pt_regs *regs, long err)
{
int ret;
unsigned long flags;
@@ -485,7 +486,7 @@ void arm64_notify_segfault(unsigned long addr)
force_signal_inject(SIGSEGV, code, addr, 0);
}
-void do_undefinstr(struct pt_regs *regs)
+void do_undefinstr(struct pt_regs *regs, unsigned long esr)
{
/* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs))
@@ -494,28 +495,38 @@ void do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
- BUG_ON(!user_mode(regs));
+ if (!user_mode(regs))
+ die("Oops - Undefined instruction", regs, esr);
+
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_undefinstr);
-void do_bti(struct pt_regs *regs)
+void do_el0_bti(struct pt_regs *regs)
{
- BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
-NOKPROBE_SYMBOL(do_bti);
-void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
+void do_el1_bti(struct pt_regs *regs, unsigned long esr)
+{
+ die("Oops - BTI", regs, esr);
+}
+NOKPROBE_SYMBOL(do_el1_bti);
+
+void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
+}
+
+void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
{
/*
- * Unexpected FPAC exception or pointer authentication failure in
- * the kernel: kill the task before it does any more harm.
+ * Unexpected FPAC exception in the kernel: kill the task before it
+ * does any more harm.
*/
- BUG_ON(!user_mode(regs));
- force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
+ die("Oops - FPAC", regs, esr);
}
-NOKPROBE_SYMBOL(do_ptrauth_fault);
+NOKPROBE_SYMBOL(do_el1_fpac)
#define __user_cache_maint(insn, address, res) \
if (address >= TASK_SIZE_MAX) { \
@@ -758,7 +769,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
hook_base = cp15_64_hooks;
break;
default:
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
return;
}
@@ -773,7 +784,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
* EL0. Fall back to our usual undefined instruction handler
* so that we handle these consistently.
*/
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
}
NOKPROBE_SYMBOL(do_cp15instr);
#endif
@@ -793,7 +804,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs)
* back to our usual undefined instruction handler so that we handle
* these consistently.
*/
- do_undefinstr(regs);
+ do_undefinstr(regs, esr);
}
NOKPROBE_SYMBOL(do_sysinstr);
@@ -970,7 +981,7 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr)
{
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
- die("Oops - BUG", regs, 0);
+ die("Oops - BUG", regs, esr);
break;
case BUG_TRAP_TYPE_WARN:
@@ -991,6 +1002,38 @@ static struct break_hook bug_break_hook = {
.imm = BUG_BRK_IMM,
};
+#ifdef CONFIG_CFI_CLANG
+static int cfi_handler(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long target;
+ u32 type;
+
+ target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr));
+ type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr));
+
+ switch (report_cfi_failure(regs, regs->pc, &target, type)) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - CFI", regs, 0);
+ break;
+
+ case BUG_TRAP_TYPE_WARN:
+ break;
+
+ default:
+ return DBG_HOOK_ERROR;
+ }
+
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook cfi_break_hook = {
+ .fn = cfi_handler,
+ .imm = CFI_BRK_IMM_BASE,
+ .mask = CFI_BRK_IMM_MASK,
+};
+#endif /* CONFIG_CFI_CLANG */
+
static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
@@ -1038,7 +1081,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
* This is something that might be fixed at some point in the future.
*/
if (!recover)
- die("Oops - KASAN", regs, 0);
+ die("Oops - KASAN", regs, esr);
/* If thread survives, skip over the brk instruction and continue: */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
@@ -1052,6 +1095,9 @@ static struct break_hook kasan_break_hook = {
};
#endif
+
+#define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
+
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
@@ -1059,10 +1105,12 @@ static struct break_hook kasan_break_hook = {
int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
+#ifdef CONFIG_CFI_CLANG
+ if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
+ return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
+#endif
#ifdef CONFIG_KASAN_SW_TAGS
- unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
-
- if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
+ if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
@@ -1071,6 +1119,9 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
void __init trap_init(void)
{
register_kernel_break_hook(&bug_break_hook);
+#ifdef CONFIG_CFI_CLANG
+ register_kernel_break_hook(&cfi_break_hook);
+#endif
register_kernel_break_hook(&fault_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_kernel_break_hook(&kasan_break_hook);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a61fc4f989b3..99ae81ab91a7 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -29,9 +29,6 @@
#include <asm/signal32.h>
#include <asm/vdso.h>
-extern char vdso_start[], vdso_end[];
-extern char vdso32_start[], vdso32_end[];
-
enum vdso_abi {
VDSO_ABI_AA64,
VDSO_ABI_AA32,
@@ -136,10 +133,11 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index bafbf78fab77..619e2dc7ee14 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -40,7 +40,8 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
# kernel with CONFIG_WERROR enabled.
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
$(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
- $(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
+ $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \
+ -Wmissing-prototypes -Wmissing-declarations
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index e69fb4aaaf3e..6028f1fe2d1c 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -48,6 +48,13 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
.dynamic : { *(.dynamic) } :text :dynamic
.rela.dyn : ALIGN(8) { *(.rela .rela*) }