aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2020-10-08 17:36:01 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2020-11-09 17:15:37 +0000
commit8c96400d6a39be763130a5c493647c57726f7013 (patch)
tree0794db8f11e674731069015b34a5c593f8a77ac7 /arch
parentarm64: mm: extend linear region for 52-bit VA configurations (diff)
downloadlinux-dev-8c96400d6a39be763130a5c493647c57726f7013.tar.xz
linux-dev-8c96400d6a39be763130a5c493647c57726f7013.zip
arm64: mm: make vmemmap region a projection of the linear region
Now that we have reverted the introduction of the vmemmap struct page pointer and the separate physvirt_offset, we can simplify things further, and place the vmemmap region in the VA space in such a way that virtual to page translations and vice versa can be implemented using a single arithmetic shift. One happy coincidence resulting from this is that the 48-bit/4k and 52-bit/64k configurations (which are assumed to be the two most prevalent) end up with the same placement of the vmemmap region. In a subsequent patch, we will take advantage of this, and unify the memory maps even more. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Steve Capper <steve.capper@arm.com> Link: https://lore.kernel.org/r/20201008153602.9467-4-ardb@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/memory.h14
-rw-r--r--arch/arm64/mm/init.c2
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 8e89f9b9091e..ecd6342e27ca 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -30,8 +30,8 @@
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
* of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
- >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT)
/*
* PAGE_OFFSET - the virtual address of the start of the linear map, at the
@@ -50,7 +50,7 @@
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
-#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
+#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
@@ -308,15 +308,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#else
#define page_to_virt(x) ({ \
__typeof__(x) __page = x; \
- u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
- u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
+ u64 __addr = (u64)__page << VMEMMAP_SHIFT; \
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
#define virt_to_page(x) ({ \
- u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
- u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
- (struct page *)__addr; \
+ u64 __addr = __tag_reset((u64)(x)) & PAGE_MASK; \
+ (struct page *)((s64)__addr >> VMEMMAP_SHIFT); \
})
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7e15d92836d8..3a5e9f9298e9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -502,6 +502,8 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
+ BUILD_BUG_ON(!is_power_of_2(sizeof(struct page)));
+
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
swiotlb_init(1);