aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c57
1 files changed, 38 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8d088371196a..3974fd81d27c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -42,13 +42,13 @@
* MCD - HACK: Find somewhere to initialize this EARLY, or make this
* initializer cleaner
*/
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
EXPORT_SYMBOL(node_online_map);
-nodemask_t node_possible_map = NODE_MASK_ALL;
+nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
EXPORT_SYMBOL(node_possible_map);
-struct pglist_data *pgdat_list;
-unsigned long totalram_pages;
-unsigned long totalhigh_pages;
+struct pglist_data *pgdat_list __read_mostly;
+unsigned long totalram_pages __read_mostly;
+unsigned long totalhigh_pages __read_mostly;
long nr_swap_pages;
/*
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(nr_swap_pages);
* Used by page_zone() to look up the address of the struct zone whose
* id is encoded in the upper bits of page->flags
*/
-struct zone *zone_table[1 << ZONETABLE_SHIFT];
+struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -329,7 +329,7 @@ static inline void free_pages_check(const char *function, struct page *page)
1 << PG_writeback )))
bad_page(function, page);
if (PageDirty(page))
- ClearPageDirty(page);
+ __ClearPageDirty(page);
}
/*
@@ -806,11 +806,14 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
classzone_idx = zone_idx(zones[0]);
restart:
- /* Go through the zonelist once, looking for a zone with enough free */
+ /*
+ * Go through the zonelist once, looking for a zone with enough free.
+ * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ */
for (i = 0; (z = zones[i]) != NULL; i++) {
int do_reclaim = should_reclaim_zone(z, gfp_mask);
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
continue;
/*
@@ -845,6 +848,7 @@ zone_reclaim_retry:
*
* This is the last chance, in general, before the goto nopage.
* Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+ * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
for (i = 0; (z = zones[i]) != NULL; i++) {
if (!zone_watermark_ok(z, order, z->pages_min,
@@ -852,7 +856,7 @@ zone_reclaim_retry:
gfp_mask & __GFP_HIGH))
continue;
- if (wait && !cpuset_zone_allowed(z))
+ if (wait && !cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -867,7 +871,7 @@ zone_reclaim_retry:
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
/* go through the zonelist yet again, ignoring mins */
for (i = 0; (z = zones[i]) != NULL; i++) {
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
if (page)
@@ -903,7 +907,7 @@ rebalance:
gfp_mask & __GFP_HIGH))
continue;
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -922,7 +926,7 @@ rebalance:
classzone_idx, 0, 0))
continue;
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -1130,19 +1134,20 @@ EXPORT_SYMBOL(nr_pagecache);
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
-void __get_page_state(struct page_state *ret, int nr)
+void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
int cpu = 0;
memset(ret, 0, sizeof(*ret));
+ cpus_and(*cpumask, *cpumask, cpu_online_map);
- cpu = first_cpu(cpu_online_map);
+ cpu = first_cpu(*cpumask);
while (cpu < NR_CPUS) {
unsigned long *in, *out, off;
in = (unsigned long *)&per_cpu(page_states, cpu);
- cpu = next_cpu(cpu, cpu_online_map);
+ cpu = next_cpu(cpu, *cpumask);
if (cpu < NR_CPUS)
prefetch(&per_cpu(page_states, cpu));
@@ -1153,19 +1158,33 @@ void __get_page_state(struct page_state *ret, int nr)
}
}
+void get_page_state_node(struct page_state *ret, int node)
+{
+ int nr;
+ cpumask_t mask = node_to_cpumask(node);
+
+ nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+ nr /= sizeof(unsigned long);
+
+ __get_page_state(ret, nr+1, &mask);
+}
+
void get_page_state(struct page_state *ret)
{
int nr;
+ cpumask_t mask = CPU_MASK_ALL;
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
- __get_page_state(ret, nr + 1);
+ __get_page_state(ret, nr + 1, &mask);
}
void get_full_page_state(struct page_state *ret)
{
- __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
+ cpumask_t mask = CPU_MASK_ALL;
+
+ __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
}
unsigned long __read_page_state(unsigned long offset)
@@ -1909,7 +1928,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0;
zone->nr_active = 0;
zone->nr_inactive = 0;
- atomic_set(&zone->reclaim_in_progress, -1);
+ atomic_set(&zone->reclaim_in_progress, 0);
if (!size)
continue;