aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-10-30 15:10:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 08:54:17 -0700
commit2f770806fd2c3db9616965e57ba60d80f43c827d (patch)
treeb5b7687c0a32a69f46fde6b649f6105925e06b1f /mm/memblock.c
parentmemblock: stop using implicit alignment to SMP_CACHE_BYTES (diff)
downloadlinux-dev-2f770806fd2c3db9616965e57ba60d80f43c827d.tar.xz
linux-dev-2f770806fd2c3db9616965e57ba60d80f43c827d.zip
mm/memblock.c: warn if zero alignment was requested
After updating all memblock users to explicitly specify SMP_CACHE_BYTES alignment rather than use 0, it is still possible that uncovered users may sneak in. Add a WARN_ON_ONCE for such cases. [sfr@canb.auug.org.au: use dump_stack() instead of WARN_ON_ONCE for the alignment checks] Link: http://lkml.kernel.org/r/20181016131927.6ceba6ab@canb.auug.org.au [akpm@linux-foundation.org: add apologetic comment] Link: http://lkml.kernel.org/r/20181011060850.GA19822@rapoport-lnx Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 839531133816..7df468c8ebc8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1247,6 +1247,12 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
{
phys_addr_t found;
+ if (!align) {
+ /* Can't use WARNs this early in boot on powerpc */
+ dump_stack();
+ align = SMP_CACHE_BYTES;
+ }
+
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
if (found && !memblock_reserve(found, size)) {
@@ -1369,6 +1375,11 @@ static void * __init memblock_alloc_internal(
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
+ if (!align) {
+ dump_stack();
+ align = SMP_CACHE_BYTES;
+ }
+
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
again: