aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/prom.c
diff options
context:
space:
mode:
authorHari Bathini <hbathini@linux.ibm.com>2020-07-29 17:10:32 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2020-07-29 23:47:54 +1000
commitadfefc609e55edc5dce18a68d1526af6d70aaf86 (patch)
treee5acaa02a080feb636819e949bf1c00d0fa0244a /arch/powerpc/kernel/prom.c
parentpowerpc/kexec_file: Avoid stomping memory used by special regions (diff)
downloadlinux-dev-adfefc609e55edc5dce18a68d1526af6d70aaf86.tar.xz
linux-dev-adfefc609e55edc5dce18a68d1526af6d70aaf86.zip
powerpc/drmem: Make LMB walk a bit more flexible
Currently, numa & prom are the only users of drmem LMB walk code. Loading kdump with kexec_file also needs to walk the drmem LMBs to setup the usable memory ranges for kdump kernel. But there are couple of issues in using the code as is. One, walk_drmem_lmb() code is built into the .init section currently, while kexec_file needs it later. Two, there is no scope to pass data to the callback function for processing and/or erroring out on certain conditions. Fix that by, moving drmem LMB walk code out of .init section, adding scope to pass data to the callback function and bailing out when an error is encountered in the callback function. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Tested-by: Pingfan Liu <piliu@redhat.com> Reviewed-by: Thiago Jung Bauermann <bauerman@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/159602282727.575379.3979857013827701828.stgit@hbathini
Diffstat (limited to 'arch/powerpc/kernel/prom.c')
-rw-r--r--arch/powerpc/kernel/prom.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 5a05c5e993fd..d8a2fb87ba0c 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -470,8 +470,9 @@ static bool validate_mem_limit(u64 base, u64 *size)
* This contains a list of memory blocks along with NUMA affinity
* information.
*/
-static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
- const __be32 **usm)
+static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
+ const __be32 **usm,
+ void *data)
{
u64 base, size;
int is_kexec_kdump = 0, rngs;
@@ -486,7 +487,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
*/
if ((lmb->flags & DRCONF_MEM_RESERVED) ||
!(lmb->flags & DRCONF_MEM_ASSIGNED))
- return;
+ return 0;
if (*usm)
is_kexec_kdump = 1;
@@ -501,7 +502,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
*/
rngs = dt_mem_next_cell(dt_root_size_cells, usm);
if (!rngs) /* there are no (base, size) duple */
- return;
+ return 0;
}
do {
@@ -526,6 +527,8 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
memblock_mark_hotplug(base, size);
} while (--rngs);
+
+ return 0;
}
#endif /* CONFIG_PPC_PSERIES */
@@ -536,7 +539,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
#ifdef CONFIG_PPC_PSERIES
if (depth == 1 &&
strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
- walk_drmem_lmbs_early(node, early_init_drmem_lmb);
+ walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
return 0;
}
#endif