aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/include/asm/assembler.h7
-rw-r--r--arch/arm64/include/asm/mmu_context.h3
-rw-r--r--arch/arm64/include/asm/processor.h13
-rw-r--r--arch/arm64/kernel/head.S13
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/mmu.c1
-rw-r--r--arch/arm64/mm/proc.S10
8 files changed, 43 insertions, 10 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d2b25f51bb3..858e353b2f40 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -716,6 +716,10 @@ config ARM64_PA_BITS_52
endchoice
+config ARM64_52BIT_VA
+ def_bool y
+ depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+
config ARM64_PA_BITS
int
default 48 if ARM64_PA_BITS_48
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ba609e0439e8..122d91d4097a 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -357,11 +357,10 @@ alternative_endif
.endm
/*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
*/
- .macro tcr_set_idmap_t0sz, valreg, tmpreg
- ldr_l \tmpreg, idmap_t0sz
- bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ .macro tcr_set_t0sz, valreg, t0sz
+ bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index dfcfeffd2080..b0768502fa08 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -74,6 +74,9 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
+ if (IS_ENABLED(CONFIG_ARM64_52BIT_VA))
+ return false;
+
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 759927faf7f6..7ff75e52b762 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,10 +19,12 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#define KERNEL_DS UL(-1)
-#define USER_DS (TASK_SIZE_64 - 1)
+#define KERNEL_DS UL(-1)
+#ifdef CONFIG_ARM64_52BIT_VA
+#define USER_DS ((UL(1) << 52) - 1)
+#else
+#define USER_DS ((UL(1) << VA_BITS) - 1)
+#endif /* CONFIG_ARM64_52BIT_VA */
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
@@ -56,6 +58,9 @@
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS)
+extern u64 vabits_user;
+#define TASK_SIZE_64 (UL(1) << vabits_user)
+
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 58fcc1edd852..c229d9cfe9bf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -318,6 +318,19 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+#ifdef CONFIG_ARM64_52BIT_VA
+ mrs_s x6, SYS_ID_AA64MMFR2_EL1
+ and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ mov x5, #52
+ cbnz x6, 1f
+#endif
+ mov x5, #VA_BITS
+1:
+ adr_l x6, vabits_user
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
/*
* VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7d9571f4ae3d..5fe6d2e40e9b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -160,7 +160,7 @@ void show_pte(unsigned long addr)
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- VA_BITS, mm->pgd);
+ mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e1b2d58a311a..0d3eacc4bfbb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -52,6 +52,7 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+u64 vabits_user __ro_after_init;
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 2db1c491d45d..0cf86b17714c 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -450,7 +450,15 @@ ENTRY(__cpu_setup)
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
- tcr_set_idmap_t0sz x10, x9
+
+#ifdef CONFIG_ARM64_52BIT_VA
+ ldr_l x9, vabits_user
+ sub x9, xzr, x9
+ add x9, x9, #64
+#else
+ ldr_l x9, idmap_t0sz
+#endif
+ tcr_set_t0sz x10, x9
/*
* Set the IPS bits in TCR_EL1.