aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-10-20 13:06:52 +0200
committerIngo Molnar <mingo@kernel.org>2017-10-20 13:06:52 +0200
commit967535223f9a8d95c187a8728480b569164cd4f4 (patch)
treeb68a403701555a747b25c903d8136d86a1a1c133 /mm/sparse.c
parentmm, x86/mm: Fix performance regression in get_user_pages_fast() (diff)
parentx86/mm: Limit mmap() of /dev/mem to valid physical addresses (diff)
downloadlinux-dev-967535223f9a8d95c187a8728480b569164cd4f4.tar.xz
linux-dev-967535223f9a8d95c187a8728480b569164cd4f4.zip
Merge branch 'x86/urgent' into x86/mm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 7b4be3fd5cac..83b3bf6461af 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,14 +65,10 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (slab_is_available()) {
- if (node_state(nid, N_HIGH_MEMORY))
- section = kzalloc_node(array_size, GFP_KERNEL, nid);
- else
- section = kzalloc(array_size, GFP_KERNEL);
- } else {
+ if (slab_is_available())
+ section = kzalloc_node(array_size, GFP_KERNEL, nid);
+ else
section = memblock_virt_alloc_node(array_size, nid);
- }
return section;
}
@@ -630,7 +626,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ unsigned long section_nr = pfn_to_section_nr(pfn);
struct mem_section *ms;
/* onlining code should never touch invalid ranges */