aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2020-12-14 19:09:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 12:13:42 -0800
commitea34f78f3df62e531cf2beca997ff6bfae2b1e0d (patch)
tree077a5c262a15c180ccb3fd7d4305cdc7c0e70dfc /arch/ia64/mm
parentia64: split virtual map initialization out of paging_init() (diff)
downloadlinux-dev-ea34f78f3df62e531cf2beca997ff6bfae2b1e0d.tar.xz
linux-dev-ea34f78f3df62e531cf2beca997ff6bfae2b1e0d.zip
ia64: forbid using VIRTUAL_MEM_MAP with FLATMEM
Virtual memory map was intended to avoid wasting memory on the memory map on systems with large holes in the physical memory layout. Long ago it been superseded first by DISCONTIGMEM and then by SPARSEMEM. Moreover, SPARSEMEM_VMEMMAP provide the same functionality in much more portable way. As the first step to removing the VIRTUAL_MEM_MAP forbid it's usage with FLATMEM and panic on systems with large holes in the physical memory layout that try to run FLATMEM kernels. Link: https://lkml.kernel.org/r/20201101170454.9567-7-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matt Turner <mattst88@gmail.com> Cc: Meelis Roos <mroos@linux.ee> Cc: Michael Schmitz <schmitzmic@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c48
-rw-r--r--arch/ia64/mm/init.c14
2 files changed, 21 insertions, 41 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index ba81d8cb0059..bfc4ecd0a2ab 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -19,15 +19,12 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long max_gap;
-#endif
-
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -166,33 +163,30 @@ find_memory (void)
alloc_per_cpu_data();
}
-static void __init virtual_map_init(void)
+static int __init find_largest_hole(u64 start, u64 end, void *arg)
{
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *) 0;
- } else {
- unsigned long map_size;
+ u64 *max_gap = arg;
- /* allocate virtual_mem_map */
+ static u64 last_end = PAGE_OFFSET;
- map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- VMALLOC_END -= map_size;
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
+ /* NOTE: this algorithm assumes efi memmap table is ordered */
- /*
- * alloc_node_mem_map makes an adjustment for mem_map
- * which isn't compatible with vmem_map.
- */
- NODE_DATA(0)->node_mem_map = vmem_map +
- find_min_pfn_with_active_regions();
+ if (*max_gap < (start - last_end))
+ *max_gap = start - last_end;
+ last_end = end;
+ return 0;
+}
- printk("Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+static void __init verify_gap_absence(void)
+{
+ unsigned long max_gap;
+
+ /* Forbid FLATMEM if hole is > than 1G */
+ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+ if (max_gap >= SZ_1G)
+ panic("Cannot use FLATMEM with %ldMB hole\n"
+ "Please switch over to SPARSEMEM\n",
+ (max_gap >> 20));
}
/*
@@ -210,7 +204,7 @@ paging_init (void)
max_zone_pfns[ZONE_DMA32] = max_dma;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- virtual_map_init();
+ verify_gap_absence();
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ef12e097f318..9b5acf8fb092 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -574,20 +574,6 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init find_largest_hole(u64 start, u64 end, void *arg)
-{
- u64 *max_gap = arg;
-
- static u64 last_end = PAGE_OFFSET;
-
- /* NOTE: this algorithm assumes efi memmap table is ordered */
-
- if (*max_gap < (start - last_end))
- *max_gap = start - last_end;
- last_end = end;
- return 0;
-}
-
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)