aboutsummaryrefslogtreecommitdiffstats
path: root/mm/bootmem.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-05-01 08:47:44 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-05-01 08:47:44 -0700
commitbf61c8840efe60fd8f91446860b63338fb424158 (patch)
tree7a71832407a4f0d6346db773343f4c3ae2257b19 /mm/bootmem.c
parentInput: wacom - fix "can not retrieve extra class descriptor" for DTH2242 (diff)
parentInput: trackpoint - Optimize trackpoint init to use power-on reset (diff)
downloadlinux-dev-bf61c8840efe60fd8f91446860b63338fb424158.tar.xz
linux-dev-bf61c8840efe60fd8f91446860b63338fb424158.zip
Merge branch 'next' into for-linus
Prepare first set of updates for 3.10 merge window.
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r--mm/bootmem.c121
1 files changed, 62 insertions, 59 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 434be4ae7a04..2b0bcb019ec2 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -147,21 +147,21 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
/*
* free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
* @size: size of the range in bytes
*
* This is only useful when the bootmem allocator has already been torn
* down, but we are still initializing the system. Pages are given directly
* to the page allocator, no bootmem metadata is updated because it is gone.
*/
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
{
unsigned long cursor, end;
- kmemleak_free_part(__va(addr), size);
+ kmemleak_free_part(__va(physaddr), size);
- cursor = PFN_UP(addr);
- end = PFN_DOWN(addr + size);
+ cursor = PFN_UP(physaddr);
+ end = PFN_DOWN(physaddr + size);
for (; cursor < end; cursor++) {
__free_pages_bootmem(pfn_to_page(cursor), 0);
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
while (start < end) {
unsigned long *map, idx, vec;
+ unsigned shift;
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
+ shift = idx & (BITS_PER_LONG - 1);
+ /*
+ * vec holds at most BITS_PER_LONG map bits,
+ * bit 0 corresponds to start.
+ */
vec = ~map[idx / BITS_PER_LONG];
+
+ if (shift) {
+ vec >>= shift;
+ if (end - start >= BITS_PER_LONG)
+ vec |= ~map[idx / BITS_PER_LONG + 1] <<
+ (BITS_PER_LONG - shift);
+ }
/*
* If we have a properly aligned and fully unreserved
* BITS_PER_LONG block of pages in front of us, free
@@ -198,27 +211,21 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
- fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
- start, start + BITS_PER_LONG);
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
- unsigned long off = 0;
+ unsigned long cur = start;
- vec >>= start & (BITS_PER_LONG - 1);
- while (vec) {
+ start = ALIGN(start + 1, BITS_PER_LONG);
+ while (vec && cur != start) {
if (vec & 1) {
- page = pfn_to_page(start + off);
+ page = pfn_to_page(cur);
__free_pages_bootmem(page, 0);
- fixup_zone_present_pages(
- page_to_nid(page),
- start + off, start + off + 1);
count++;
}
vec >>= 1;
- off++;
+ ++cur;
}
- start = ALIGN(start + 1, BITS_PER_LONG);
}
}
@@ -226,17 +233,30 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
pages = bdata->node_low_pfn - bdata->node_min_pfn;
pages = bootmem_bootmap_pages(pages);
count += pages;
- while (pages--) {
- fixup_zone_present_pages(page_to_nid(page),
- page_to_pfn(page), page_to_pfn(page) + 1);
+ while (pages--)
__free_pages_bootmem(page++, 0);
- }
bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
return count;
}
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+ struct zone *z;
+
+ /*
+ * In free_area_init_core(), highmem zone's managed_pages is set to
+ * present_pages, and bootmem allocator doesn't allocate from highmem
+ * zones. So there's no need to recalculate managed_pages because all
+ * highmem pages will be managed by the buddy system. Here highmem
+ * zone also includes highmem movable zone.
+ */
+ for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+ if (!is_highmem(z))
+ z->managed_pages = 0;
+}
+
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
* @pgdat: node to be released
@@ -246,6 +266,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
+ reset_node_lowmem_managed_pages(pgdat);
return free_all_bootmem_core(pgdat->bdata);
}
@@ -258,6 +279,10 @@ unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
bootmem_data_t *bdata;
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_lowmem_managed_pages(pgdat);
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
@@ -385,21 +410,21 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
/**
* free_bootmem - mark a page range as usable
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
*
* The range must be contiguous but may span node boundaries.
*/
-void __init free_bootmem(unsigned long addr, unsigned long size)
+void __init free_bootmem(unsigned long physaddr, unsigned long size)
{
unsigned long start, end;
- kmemleak_free_part(__va(addr), size);
+ kmemleak_free_part(__va(physaddr), size);
- start = PFN_UP(addr);
- end = PFN_DOWN(addr + size);
+ start = PFN_UP(physaddr);
+ end = PFN_DOWN(physaddr + size);
mark_bootmem(start, end, 0, 0);
}
@@ -447,12 +472,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
- int flags)
-{
- return reserve_bootmem(phys, len, flags);
-}
-
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
@@ -583,27 +602,6 @@ find_block:
return NULL;
}
-static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-#ifdef CONFIG_HAVE_ARCH_BOOTMEM
- {
- bootmem_data_t *p_bdata;
-
- p_bdata = bootmem_arch_preferred_node(bdata, size, align,
- goal, limit);
- if (p_bdata)
- return alloc_bootmem_bdata(p_bdata, size, align,
- goal, limit);
- }
-#endif
- return NULL;
-}
-
static void * __init alloc_bootmem_core(unsigned long size,
unsigned long align,
unsigned long goal,
@@ -612,9 +610,8 @@ static void * __init alloc_bootmem_core(unsigned long size,
bootmem_data_t *bdata;
void *region;
- region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
- if (region)
- return region;
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
list_for_each_entry(bdata, &bdata_list, list) {
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
@@ -712,11 +709,9 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
{
void *ptr;
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
again:
- ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
- align, goal, limit);
- if (ptr)
- return ptr;
/* do not panic in alloc_bootmem_bdata() */
if (limit && goal + size > limit)
@@ -838,6 +833,14 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
}
+void * __init __alloc_bootmem_low_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal)
+{
+ return ___alloc_bootmem_nopanic(size, align, goal,
+ ARCH_LOW_ADDRESS_LIMIT);
+}
+
/**
* __alloc_bootmem_low_node - allocate low boot memory from a specific node
* @pgdat: node to allocate from