aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2020-06-03 15:57:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 20:09:43 -0700
commit3f08a302f533f74ad2e909e7a61274aa7eebc0ab (patch)
treefcd44c7f916a14c884e2671e03bbea9f78ffd36d /mm/page_alloc.c
parentmm: make early_pfn_to_nid() and related defintions close to each other (diff)
downloadlinux-dev-3f08a302f533f74ad2e909e7a61274aa7eebc0ab.tar.xz
linux-dev-3f08a302f533f74ad2e909e7a61274aa7eebc0ab.zip
mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option
CONFIG_HAVE_MEMBLOCK_NODE_MAP is used to differentiate initialization of nodes and zones structures between the systems that have region to node mapping in memblock and those that don't. Currently all the NUMA architectures enable this option and for the non-NUMA systems we can presume that all the memory belongs to node 0 and therefore the compile time configuration option is not required. The remaining few architectures that use DISCONTIGMEM without NUMA are easily updated to use memblock_add_node() instead of memblock_add() and thus have proper correspondence of memblock regions to NUMA nodes. Still, free_area_init_node() must have a backward compatible version because its semantics with and without CONFIG_HAVE_MEMBLOCK_NODE_MAP is different. Once all the architectures will use the new semantics, the entire compatibility layer can be dropped. To avoid addition of extra run time memory to store node id for architectures that keep memblock but have only a single node, the node id field of the memblock_region is guarded by CONFIG_NEED_MULTIPLE_NODES and the corresponding accessors presume that in those cases it is always 0. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Hoan Tran <hoan@os.amperecomputing.com> [arm64] Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Cc: Baoquan He <bhe@redhat.com> Cc: Brian Cain <bcain@codeaurora.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200412194859.12663-4-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c101
1 files changed, 59 insertions, 42 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8741ae0828e1..430e35384b78 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -335,7 +335,6 @@ static unsigned long nr_kernel_pages __initdata;
static unsigned long nr_all_pages __initdata;
static unsigned long dma_reserve __initdata;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long required_kernelcore __initdata;
@@ -348,7 +347,6 @@ static bool mirrored_kernelcore __meminitdata;
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
@@ -1499,8 +1497,7 @@ void __free_pages_core(struct page *page, unsigned int order)
__free_pages(page, order);
}
-#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
- defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+#ifdef CONFIG_NEED_MULTIPLE_NODES
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
@@ -1542,7 +1539,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
return nid;
}
-#endif
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
/* Only safe to use early in boot when initialisation is single-threaded */
@@ -5936,7 +5933,6 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
static bool __meminit
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
{
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static struct memblock_region *r;
if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
@@ -5952,7 +5948,6 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
return true;
}
}
-#endif
return false;
}
@@ -6585,8 +6580,7 @@ static unsigned long __init zone_absent_pages_in_node(int nid,
return nr_absent;
}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-static inline unsigned long __init zone_spanned_pages_in_node(int nid,
+static inline unsigned long __init compat_zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -6605,7 +6599,7 @@ static inline unsigned long __init zone_spanned_pages_in_node(int nid,
return zones_size[zone_type];
}
-static inline unsigned long __init zone_absent_pages_in_node(int nid,
+static inline unsigned long __init compat_zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -6617,13 +6611,12 @@ static inline unsigned long __init zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
static void __init calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
unsigned long *zones_size,
- unsigned long *zholes_size)
+ unsigned long *zholes_size,
+ bool compat)
{
unsigned long realtotalpages = 0, totalpages = 0;
enum zone_type i;
@@ -6631,17 +6624,38 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
unsigned long zone_start_pfn, zone_end_pfn;
+ unsigned long spanned, absent;
unsigned long size, real_size;
- size = zone_spanned_pages_in_node(pgdat->node_id, i,
- node_start_pfn,
- node_end_pfn,
- &zone_start_pfn,
- &zone_end_pfn,
- zones_size);
- real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
- node_start_pfn, node_end_pfn,
- zholes_size);
+ if (compat) {
+ spanned = compat_zone_spanned_pages_in_node(
+ pgdat->node_id, i,
+ node_start_pfn,
+ node_end_pfn,
+ &zone_start_pfn,
+ &zone_end_pfn,
+ zones_size);
+ absent = compat_zone_absent_pages_in_node(
+ pgdat->node_id, i,
+ node_start_pfn,
+ node_end_pfn,
+ zholes_size);
+ } else {
+ spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
+ node_start_pfn,
+ node_end_pfn,
+ &zone_start_pfn,
+ &zone_end_pfn,
+ zones_size);
+ absent = zone_absent_pages_in_node(pgdat->node_id, i,
+ node_start_pfn,
+ node_end_pfn,
+ zholes_size);
+ }
+
+ size = spanned;
+ real_size = size - absent;
+
if (size)
zone->zone_start_pfn = zone_start_pfn;
else
@@ -6941,10 +6955,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
-#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= offset;
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
}
@@ -6961,9 +6973,10 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
#endif
-void __init free_area_init_node(int nid, unsigned long *zones_size,
- unsigned long node_start_pfn,
- unsigned long *zholes_size)
+static void __init __free_area_init_node(int nid, unsigned long *zones_size,
+ unsigned long node_start_pfn,
+ unsigned long *zholes_size,
+ bool compat)
{
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long start_pfn = 0;
@@ -6975,16 +6988,16 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
pgdat->per_cpu_nodestats = NULL;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
- get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
- pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
- (u64)start_pfn << PAGE_SHIFT,
- end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
-#else
- start_pfn = node_start_pfn;
-#endif
+ if (!compat) {
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
+ (u64)start_pfn << PAGE_SHIFT,
+ end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
+ } else {
+ start_pfn = node_start_pfn;
+ }
calculate_node_totalpages(pgdat, start_pfn, end_pfn,
- zones_size, zholes_size);
+ zones_size, zholes_size, compat);
alloc_node_mem_map(pgdat);
pgdat_set_deferred_range(pgdat);
@@ -6992,6 +7005,14 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat);
}
+void __init free_area_init_node(int nid, unsigned long *zones_size,
+ unsigned long node_start_pfn,
+ unsigned long *zholes_size)
+{
+ __free_area_init_node(nid, zones_size, node_start_pfn, zholes_size,
+ true);
+}
+
#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
@@ -7075,8 +7096,6 @@ static inline void __init init_unavailable_mem(void)
}
#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-
#if MAX_NUMNODES > 1
/*
* Figure out the number of possible node ids.
@@ -7505,8 +7524,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
init_unavailable_mem();
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
- free_area_init_node(nid, NULL,
- find_min_pfn_for_node(nid), NULL);
+ __free_area_init_node(nid, NULL,
+ find_min_pfn_for_node(nid), NULL, false);
/* Any memory on that node */
if (pgdat->node_present_pages)
@@ -7571,8 +7590,6 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
void adjust_managed_page_count(struct page *page, long count)
{
atomic_long_add(count, &page_zone(page)->managed_pages);