aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/mm/proc.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/proc.S')
-rw-r--r--arch/arm64/mm/proc.S104
1 files changed, 64 insertions, 40 deletions
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index aafed6902411..197a9ba2d5ea 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -11,11 +11,13 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/smp.h>
#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
@@ -131,45 +133,19 @@ alternative_endif
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ reset_amuserenr_el0 x0 // Disable AMU access from EL0
alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
+ ptrauth_keys_install_kernel x14, 0, x1, x2, x3
isb
ret
SYM_FUNC_END(cpu_do_resume)
.popsection
#endif
-/*
- * cpu_do_switch_mm(pgd_phys, tsk)
- *
- * Set the translation table base pointer to be pgd_phys.
- *
- * - pgd_phys - physical address of new TTB
- */
-SYM_FUNC_START(cpu_do_switch_mm)
- mrs x2, ttbr1_el1
- mmid x1, x1 // get mm->context.id
- phys_to_ttbr x3, x0
-
-alternative_if ARM64_HAS_CNP
- cbz x1, 1f // skip CNP for reserved ASID
- orr x3, x3, #TTBR_CNP_BIT
-1:
-alternative_else_nop_endif
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- bfi x3, x1, #48, #16 // set the ASID field in TTBR0
-#endif
- bfi x2, x1, #48, #16 // set the ASID
- msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
- isb
- msr ttbr0_el1, x3 // now update TTBR0
- isb
- b post_ttbr_update_workaround // Back to C code...
-SYM_FUNC_END(cpu_do_switch_mm)
-
.pushsection ".idmap.text", "awx"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
@@ -408,35 +384,37 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
/*
* __cpu_setup
*
- * Initialise the processor for turning the MMU on. Return in x0 the
- * value of the SCTLR_EL1 register.
+ * Initialise the processor for turning the MMU on.
+ *
+ * Input:
+ * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME.
+ * Output:
+ * Return in x0 the value of the SCTLR_EL1 register.
*/
.pushsection ".idmap.text", "awx"
SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
- mov x0, #3 << 20
- msr cpacr_el1, x0 // Enable FP/ASIMD
- mov x0, #1 << 12 // Reset mdscr_el1 and disable
- msr mdscr_el1, x0 // access to the DCC from EL0
+ mov x1, #3 << 20
+ msr cpacr_el1, x1 // Enable FP/ASIMD
+ mov x1, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now,
enable_dbg // since this is per-cpu
- reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ reset_pmuserenr_el0 x1 // Disable PMU access from EL0
+ reset_amuserenr_el0 x1 // Disable AMU access from EL0
+
/*
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
msr mair_el1, x5
/*
- * Prepare SCTLR
- */
- mov_q x0, SCTLR_EL1_SET
- /*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
- ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5
@@ -468,5 +446,51 @@ SYM_FUNC_START(__cpu_setup)
1:
#endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10
+ mov x1, x0
+ /*
+ * Prepare SCTLR
+ */
+ mov_q x0, SCTLR_EL1_SET
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+ /* No ptrauth setup for run time cpus */
+ cmp x1, #ARM64_CPU_RUNTIME
+ b.eq 3f
+
+ /* Check if the CPU supports ptrauth */
+ mrs x2, id_aa64isar1_el1
+ ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
+ cbz x2, 3f
+
+ /*
+ * The primary cpu keys are reset here and can be
+ * re-initialised with some proper values later.
+ */
+ msr_s SYS_APIAKEYLO_EL1, xzr
+ msr_s SYS_APIAKEYHI_EL1, xzr
+
+ /* Just enable ptrauth for primary cpu */
+ cmp x1, #ARM64_CPU_BOOT_PRIMARY
+ b.eq 2f
+
+ /* if !system_supports_address_auth() then skip enable */
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+ b 3f
+alternative_else_nop_endif
+
+ /* Install ptrauth key for secondary cpus */
+ adr_l x2, secondary_data
+ ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task
+ cbz x3, 2f // check for slow booting cpus
+ ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY]
+ msr_s SYS_APIAKEYLO_EL1, x3
+ msr_s SYS_APIAKEYHI_EL1, x4
+
+2: /* Enable ptrauth instructions */
+ ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
+ orr x0, x0, x2
+3:
+#endif
ret // return to head.S
SYM_FUNC_END(__cpu_setup)