aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@arm.com>2019-08-07 16:55:19 +0100
committerWill Deacon <will@kernel.org>2019-08-09 11:17:24 +0100
commitc812026c54cfaec23fa1d78cdbfd0e56e787470a (patch)
tree21c649aa4bd867a2d871e7c2ad9192824999874f /arch/arm64/kernel
parentarm64: mm: Introduce vabits_actual (diff)
downloadlinux-dev-c812026c54cfaec23fa1d78cdbfd0e56e787470a.tar.xz
linux-dev-c812026c54cfaec23fa1d78cdbfd0e56e787470a.zip
arm64: mm: Logic to make offset_ttbr1 conditional
When running with a 52-bit userspace VA and a 48-bit kernel VA we offset ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to be used for both userspace and kernel. Moving on to a 52-bit kernel VA we no longer require this offset to ttbr1_el1 should we be running on a system with HW support for 52-bit VAs. This patch introduces conditional logic to offset_ttbr1 to query SYS_ID_AA64MMFR2_EL1 whenever 52-bit VAs are selected. If there is HW support for 52-bit VAs then the ttbr1 offset is skipped. We choose to read a system register rather than vabits_actual because offset_ttbr1 can be called in places where the kernel data is not actually mapped. Calls to offset_ttbr1 appear to be made from rarely called code paths so this extra logic is not expected to adversely affect performance. Signed-off-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/hibernate-asm.S8
2 files changed, 5 insertions, 5 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 6dc7349868d9..a96dc4386c7c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -777,7 +777,7 @@ ENTRY(__enable_mmu)
phys_to_ttbr x1, x1
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
- offset_ttbr1 x1
+ offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 2f4a2ce7264b..38bcd4d4e43b 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -22,14 +22,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
- offset_ttbr1 \tmp
+ offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp
isb
.endm
@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0, x6
+ break_before_make_ttbr_switch x5, x0, x6, x8
mov x21, x1
mov x30, x2
@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21, x6
+ break_before_make_ttbr_switch x25, x21, x6, x8
ic ialluis
dsb ish