aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/efi.h4
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/kaslr.c6
-rw-r--r--arch/arm64/mm/kasan_init.c3
6 files changed, 17 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 76a144702586..b54d3a86c444 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
/*
* On arm64, we have to ensure that the initrd ends up in the linear region,
- * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is
+ * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
* guaranteed to cover the kernel Image.
*
* Since the EFI stub is part of the kernel Image, we can relax the
@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
unsigned long image_addr)
{
- return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1));
+ return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
}
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 968659c90f5c..79e75e45d560 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -52,6 +52,13 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
+#if VA_BITS > 48
+#define VA_BITS_MIN (48)
+#else
+#define VA_BITS_MIN (VA_BITS)
+#endif
+#define _VA_START(va) (UL(0xffffffffffffffff) - \
+ (UL(1) << ((va) - 1)) + 1)
#define KERNEL_START _text
#define KERNEL_END _end
@@ -74,7 +81,7 @@
#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_THREAD_SHIFT 0
-#define KASAN_SHADOW_END (VA_START)
+#define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN))
#endif
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 844e2964b0f5..0e1f2770192a 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -42,7 +42,7 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
-#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS)
+#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64 (UL(1) << vabits_user)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2cdacd1c141b..ac58c69993ec 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -314,7 +314,7 @@ __create_page_tables:
mov x5, #52
cbnz x6, 1f
#endif
- mov x5, #VA_BITS
+ mov x5, #VA_BITS_MIN
1:
adr_l x6, vabits_user
str x5, [x6]
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 708051655ad9..5a59f7567f9c 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+ * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs
*/
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = BIT(VA_BITS - 3) + (seed & mask);
+ mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+ offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 05edfe9b02e4..725222271474 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
true);