aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorPeter Collingbourne <pcc@google.com>2021-03-18 20:10:53 -0700
committerCatalin Marinas <catalin.marinas@arm.com>2021-04-13 17:31:44 +0100
commit201698626fbca1cf1a3b686ba14cf2a056500716 (patch)
tree84e0f6591d4d0f3ea2052cc36ad797194f071b44 /arch/arm64/kernel/entry.S
parentarm64: mte: make the per-task SCTLR_EL1 field usable elsewhere (diff)
downloadlinux-dev-201698626fbca1cf1a3b686ba14cf2a056500716.tar.xz
linux-dev-201698626fbca1cf1a3b686ba14cf2a056500716.zip
arm64: Introduce prctl(PR_PAC_{SET,GET}_ENABLED_KEYS)
This change introduces a prctl that allows the user program to control which PAC keys are enabled in a particular task. The main reason why this is useful is to enable a userspace ABI that uses PAC to sign and authenticate function pointers and other pointers exposed outside of the function, while still allowing binaries conforming to the ABI to interoperate with legacy binaries that do not sign or authenticate pointers. The idea is that a dynamic loader or early startup code would issue this prctl very early after establishing that a process may load legacy binaries, but before executing any PAC instructions. This change adds a small amount of overhead to kernel entry and exit due to additional required instruction sequences. On a DragonBoard 845c (Cortex-A75) with the powersave governor, the overhead of similar instruction sequences was measured as 4.9ns when simulating the common case where IA is left enabled, or 43.7ns when simulating the uncommon case where IA is disabled. These numbers can be seen as the worst case scenario, since in more realistic scenarios a better performing governor would be used and a newer chip would be used that would support PAC unlike Cortex-A75 and would be expected to be faster than Cortex-A75. On an Apple M1 under a hypervisor, the overhead of the entry/exit instruction sequences introduced by this patch was measured as 0.3ns in the case where IA is left enabled, and 33.0ns in the case where IA is disabled. Signed-off-by: Peter Collingbourne <pcc@google.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Link: https://linux-review.googlesource.com/id/Ibc41a5e6a76b275efbaa126b31119dc197b927a5 Link: https://lore.kernel.org/r/d6609065f8f40397a4124654eb68c9f490b4d477.1616123271.git.pcc@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S39
1 files changed, 37 insertions, 2 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a31a0a713c85..36ae1cdfaeb6 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -247,7 +247,24 @@ alternative_else_nop_endif
check_mte_async_tcf x19, x22
apply_ssbd 1, x22, x23
- ptrauth_keys_install_kernel tsk, x20, x22, x23
+ ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * Enable IA for in-kernel PAC if the task had it disabled. Although
+ * this could be implemented with an unconditional MRS which would avoid
+ * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbnz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ mrs x0, sctlr_el1
+ orr x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+1:
+ isb
+alternative_else_nop_endif
+#endif
mte_set_kernel_gcr x22, x23
@@ -351,9 +368,27 @@ alternative_else_nop_endif
3:
scs_save tsk, x0
- /* No kernel C function calls after this as user keys are set. */
+ /*
+ * No kernel C function calls after this as user keys are set and IA may
+ * be disabled.
+ */
ptrauth_keys_install_user tsk, x0, x1, x2
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * IA was enabled for in-kernel PAC. Disable it now if needed.
+ * All other per-task SCTLR bits were updated on task switch.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbnz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ mrs x0, sctlr_el1
+ bic x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+1:
+alternative_else_nop_endif
+#endif
+
mte_set_user_gcr tsk, x0, x1
apply_ssbd 0, x0, x1