aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorJungseok Lee <jays.lee@samsung.com>2014-05-12 18:40:51 +0900
committerCatalin Marinas <catalin.marinas@arm.com>2014-07-23 15:27:40 +0100
commitc79b954bf6c006f2d3dd9d01f231abeead13a410 (patch)
treeb7d4375c12d2b61554af0e5eb7596b22c60c643e /arch/arm64/kernel
parentarm64: Add 4 levels of page tables definition with 4KB pages (diff)
downloadlinux-dev-c79b954bf6c006f2d3dd9d01f231abeead13a410.tar.xz
linux-dev-c79b954bf6c006f2d3dd9d01f231abeead13a410.zip
arm64: mm: Implement 4 levels of translation tables
This patch implements 4 levels of translation tables since 3 levels of page tables with 4KB pages cannot support 40-bit physical address space described in [1] due to the following issue. It is a restriction that kernel logical memory map with 4KB + 3 levels (0xffffffc000000000-0xffffffffffffffff) cannot cover RAM region from 544GB to 1024GB in [1]. Specifically, ARM64 kernel fails to create mapping for this region in map_mem function since __phys_to_virt for this region reaches to address overflow. If SoC design follows the document, [1], over 32GB RAM would be placed from 544GB. Even 64GB system is supposed to use the region from 544GB to 576GB for only 32GB RAM. Naturally, it would reach to enable 4 levels of page tables to avoid hacking __virt_to_phys and __phys_to_virt. However, it is recommended 4 levels of page table should be only enabled if memory map is too sparse or there is about 512GB RAM. References ---------- [1]: Principles of ARM Memory Maps, White Paper, Issue C Signed-off-by: Jungseok Lee <jays.lee@samsung.com> Reviewed-by: Sungjinn Chung <sungjinn.chung@samsung.com> Acked-by: Kukjin Kim <kgene.kim@samsung.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Steve Capper <steve.capper@linaro.org> [catalin.marinas@arm.com: MEMBLOCK_INITIAL_LIMIT removed, same as PUD_SIZE] [catalin.marinas@arm.com: early_ioremap_init() updated for 4 levels] [catalin.marinas@arm.com: 48-bit VA depends on BROKEN until KVM is fixed] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Jungseok Lee <jungseoklee85@gmail.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/head.S42
-rw-r--r--arch/arm64/kernel/traps.c5
2 files changed, 39 insertions, 8 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index fa3b7fb8a77a..847b99daad79 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -476,16 +476,42 @@ ENDPROC(__calc_phys_offset)
.quad PAGE_OFFSET
/*
- * Macro to populate the PGD for the corresponding block entry in the next
- * level (tbl) for the given virtual address.
+ * Macro to populate the PUD for the corresponding block entry in the next
+ * level (tbl) for the given virtual address in case of 4 levels.
*
- * Preserves: pgd, tbl, virt
- * Corrupts: tmp1, tmp2
+ * Preserves: pgd, virt
+ * Corrupts: tbl, tmp1, tmp2
+ * Returns: pud
*/
- .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
+ .macro create_pud_entry, pgd, tbl, virt, pud, tmp1, tmp2
+#ifdef CONFIG_ARM64_4_LEVELS
+ add \tbl, \tbl, #PAGE_SIZE // bump tbl 1 page up.
+ // to make room for pud
+ add \pud, \pgd, #PAGE_SIZE // pgd points to pud which
+ // follows pgd
+ lsr \tmp1, \virt, #PUD_SHIFT
+ and \tmp1, \tmp1, #PTRS_PER_PUD - 1 // PUD index
+ orr \tmp2, \tbl, #3 // PUD entry table type
+ str \tmp2, [\pud, \tmp1, lsl #3]
+#else
+ mov \pud, \tbl
+#endif
+ .endm
+
+/*
+ * Macro to populate the PGD (and possibily PUD) for the corresponding
+ * block entry in the next level (tbl) for the given virtual address.
+ *
+ * Preserves: pgd, virt
+ * Corrupts: tmp1, tmp2, tmp3
+ * Returns: tbl -> page where block mappings can be placed
+ * (changed to make room for pud with 4 levels, preserved otherwise)
+ */
+ .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2, tmp3
+ create_pud_entry \pgd, \tbl, \virt, \tmp3, \tmp1, \tmp2
lsr \tmp1, \virt, #PGDIR_SHIFT
and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
- orr \tmp2, \tbl, #3 // PGD entry table type
+ orr \tmp2, \tmp3, #3 // PGD entry table type
str \tmp2, [\pgd, \tmp1, lsl #3]
.endm
@@ -550,7 +576,7 @@ __create_page_tables:
add x0, x25, #PAGE_SIZE // section table address
ldr x3, =KERNEL_START
add x3, x3, x28 // __pa(KERNEL_START)
- create_pgd_entry x25, x0, x3, x5, x6
+ create_pgd_entry x25, x0, x3, x1, x5, x6
ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
add x6, x6, x28 // __pa(KERNEL_END)
@@ -561,7 +587,7 @@ __create_page_tables:
*/
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
- create_pgd_entry x26, x0, x5, x3, x6
+ create_pgd_entry x26, x0, x5, x1, x3, x6
ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 506f7814e305..02cd3f023e9a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -339,6 +339,11 @@ void __pmd_error(const char *file, int line, unsigned long val)
pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
}
+void __pud_error(const char *file, int line, unsigned long val)
+{
+ pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
+}
+
void __pgd_error(const char *file, int line, unsigned long val)
{
pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);