aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2021-11-23 18:29:25 +0000
committerJames Morse <james.morse@arm.com>2022-02-16 13:17:30 +0000
commitbd09128d16fac3c34b80bd6a29088ac632e8ce09 (patch)
treed4018981e47a285a3efac3eb15e2de2074592d61 /arch/arm64
parentarm64: entry: Add macro for reading symbol addresses from the trampoline (diff)
downloadlinux-dev-bd09128d16fac3c34b80bd6a29088ac632e8ce09.tar.xz
linux-dev-bd09128d16fac3c34b80bd6a29088ac632e8ce09.zip
arm64: Add percpu vectors for EL1
The Spectre-BHB workaround adds a firmware call to the vectors. This is needed on some CPUs, but not others. To avoid the unaffected CPU in a big/little pair from making the firmware call, create per cpu vectors. The per-cpu vectors only apply when returning from EL0. Systems using KPTI can use the canonical 'full-fat' vectors directly at EL1, the trampoline exit code will switch to this_cpu_vector on exit to EL0. Systems not using KPTI should always use this_cpu_vector. this_cpu_vector will point at a vector in tramp_vecs or __bp_harden_el1_vectors, depending on whether KPTI is in use. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: James Morse <james.morse@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/vectors.h29
-rw-r--r--arch/arm64/kernel/cpufeature.c11
-rw-r--r--arch/arm64/kernel/entry.S12
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c10
4 files changed, 53 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
index bac53fad037d..3f76dfd9e074 100644
--- a/arch/arm64/include/asm/vectors.h
+++ b/arch/arm64/include/asm/vectors.h
@@ -5,6 +5,15 @@
#ifndef __ASM_VECTORS_H
#define __ASM_VECTORS_H
+#include <linux/bug.h>
+#include <linux/percpu.h>
+
+#include <asm/fixmap.h>
+
+extern char vectors[];
+extern char tramp_vectors[];
+extern char __bp_harden_el1_vectors[];
+
/*
* Note: the order of this enum corresponds to two arrays in entry.S:
* tramp_vecs and __bp_harden_el1_vectors. By default the canonical
@@ -29,6 +38,24 @@ enum arm64_bp_harden_el1_vectors {
* Remap the kernel before branching to the canonical vectors.
*/
EL1_VECTOR_KPTI,
-+};
+};
+
+/* The vectors to use on return from EL0. e.g. to remap the kernel */
+DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
+
+#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_VALIAS 0
+#endif
+
+static inline const char *
+arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
+{
+ if (arm64_kernel_unmapped_at_el0())
+ return (char *)TRAMP_VALIAS + SZ_2K * slot;
+
+ WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
+
+ return __bp_harden_el1_vectors + SZ_2K * slot;
+}
#endif /* __ASM_VECTORS_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e5f23dab1c8d..45fed4974c44 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -73,6 +73,8 @@
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/kasan.h>
+#include <linux/percpu.h>
+
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -85,6 +87,7 @@
#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
+#include <asm/vectors.h>
#include <asm/virt.h>
/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
@@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
+DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
+
/*
* Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
* support it?
@@ -1590,6 +1595,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
int cpu = smp_processor_id();
+ if (__this_cpu_read(this_cpu_vector) == vectors) {
+ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
+
+ __this_cpu_write(this_cpu_vector, v);
+ }
+
/*
* We don't need to rewrite the page-tables if either we've done
* it already or we have KASLR enabled and therefore have not
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 8da732fefd8f..a62fee121138 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -38,7 +38,6 @@
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
.Lventry_start\@:
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
/*
* This must be the first instruction of the EL0 vector entries. It is
@@ -53,7 +52,6 @@
.endif
.Lskip_tramp_vectors_cleanup\@:
.endif
-#endif
sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK
@@ -712,10 +710,10 @@ alternative_else_nop_endif
.endm
.macro tramp_exit, regsize = 64
- adr x30, tramp_vectors
-#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
- add x30, x30, SZ_4K
-#endif
+ tramp_data_read_var x30, this_cpu_vector
+ get_this_cpu_offset x29
+ ldr x30, [x30, x29]
+
msr vbar_el1, x30
ldr lr, [sp, #S_LR]
tramp_unmap_kernel x29
@@ -775,6 +773,8 @@ __entry_tramp_data_vectors:
__entry_tramp_data___sdei_asm_handler:
.quad __sdei_asm_handler
#endif /* CONFIG_ARM_SDE_INTERFACE */
+__entry_tramp_data_this_cpu_vector:
+ .quad this_cpu_vector
SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 11d053fdd604..54af47005e45 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -10,6 +10,7 @@
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
@@ -24,6 +25,8 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/vectors.h>
/* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
@@ -67,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps);
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
- extern char vectors[]; /* kernel exception vectors */
+ const char *host_vectors = vectors;
___deactivate_traps(vcpu);
@@ -81,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
- write_sysreg(vectors, vbar_el1);
+
+ if (!arm64_kernel_unmapped_at_el0())
+ host_vectors = __this_cpu_read(this_cpu_vector);
+ write_sysreg(host_vectors, vbar_el1);
}
NOKPROBE_SYMBOL(__deactivate_traps);