aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@arm.com>2019-08-07 16:55:20 +0100
committerWill Deacon <will@kernel.org>2019-08-09 11:17:25 +0100
commitc8b6d2ccf9b10ce872cdea037f9685804440bb7e (patch)
tree5b38adefb8f977e7f37e37da5efc98d34383867c /arch
parentarm64: mm: Logic to make offset_ttbr1 conditional (diff)
downloadlinux-dev-c8b6d2ccf9b10ce872cdea037f9685804440bb7e.tar.xz
linux-dev-c8b6d2ccf9b10ce872cdea037f9685804440bb7e.zip
arm64: mm: Separate out vmemmap
vmemmap is a preprocessor definition that depends on a variable, memstart_addr. In a later patch we will need to expand the size of the VMEMMAP region and optionally modify vmemmap depending upon whether or not hardware support is available for 52-bit virtual addresses. This patch changes vmemmap to be a variable. As the old definition depended on a variable load, this should not affect performance noticeably. Signed-off-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/mm/init.c5
2 files changed, 7 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 046b811309bb..4a695b9ee0f0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -23,8 +23,6 @@
#define VMALLOC_START (MODULES_END)
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
@@ -35,6 +33,8 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
+extern struct page *vmemmap;
+
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pud_error(const char *file, int line, unsigned long val);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e752f46d430e..2940221e5519 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(memstart_addr);
s64 physvirt_offset __ro_after_init;
EXPORT_SYMBOL(physvirt_offset);
+struct page *vmemmap __ro_after_init;
+EXPORT_SYMBOL(vmemmap);
+
phys_addr_t arm64_dma_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
@@ -320,6 +323,8 @@ void __init arm64_memblock_init(void)
physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+ vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be