aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-04-19 17:17:29 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-04-19 17:17:29 +0100
commitadf6d34e460387ee3e8f1e1875d52bff51212c7d (patch)
tree88ef100143e6184103a608f82dfd232bf6376eaf /mm
parentMerge branches 'arm', 'at91', 'ep93xx', 'iop', 'ks8695', 'misc', 'mxc', 'ns9x', 'orion', 'pxa', 'sa1100', 's3c' and 'sparsemem' into devel (diff)
parentARM: OMAP2: New DPLL clock framework (diff)
downloadlinux-dev-adf6d34e460387ee3e8f1e1875d52bff51212c7d.tar.xz
linux-dev-adf6d34e460387ee3e8f1e1875d52bff51212c7d.zip
Merge branch 'omap2-upstream' into devel
Diffstat (limited to '')
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/memcontrol.c26
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse-vmemmap.c8
4 files changed, 43 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 74c1b6b0b37b..51c9e2c01640 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -401,12 +401,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
struct page *page;
unsigned long nr_pages;
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes. Iterate across all nodes until we
+ * can no longer free unreserved surplus pages. This occurs when
+ * the nodes with surplus pages have no free pages.
+ */
+ unsigned long remaining_iterations = num_online_nodes();
+
/* Uncommit the reservation */
resv_huge_pages -= unused_resv_pages;
nr_pages = min(unused_resv_pages, surplus_huge_pages);
- while (nr_pages) {
+ while (remaining_iterations-- && nr_pages) {
nid = next_node(nid, node_online_map);
if (nid == MAX_NUMNODES)
nid = first_node(node_online_map);
@@ -424,6 +432,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
surplus_huge_pages--;
surplus_huge_pages_node[nid]--;
nr_pages--;
+ remaining_iterations = num_online_nodes();
}
}
}
@@ -671,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
{
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
- "Node %d HugePages_Free: %5u\n",
+ "Node %d HugePages_Free: %5u\n"
+ "Node %d HugePages_Surp: %5u\n",
nid, nr_huge_pages_node[nid],
- nid, free_huge_pages_node[nid]);
+ nid, free_huge_pages_node[nid],
+ nid, surplus_huge_pages_node[nid]);
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9b648bd63451..2e0bfc93484b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
/*
* Should page_cgroup's go to their own slab?
* One could optimize the performance of the charging routine
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
struct mem_cgroup_per_zone *mz;
unsigned long flags;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
/*
* Check if our page_cgroup is valid
*/
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
{
struct page_cgroup *pc;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
lock_page_cgroup(page);
pc = page_get_page_cgroup(page);
if (pc)
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
int ret = -EBUSY;
int node, zid;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
@@ -966,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz;
- int zone;
+ int zone, tmp = node;
/*
* This routine is called against possible nodes.
* But it's BUG to call kmalloc() against offline node.
@@ -975,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
* never be onlined. It's better to use memory hotplug callback
* function.
*/
- if (node_state(node, N_HIGH_MEMORY))
- pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
- else
- pn = kmalloc(sizeof(*pn), GFP_KERNEL);
+ if (!node_state(node, N_NORMAL_MEMORY))
+ tmp = -1;
+ pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
if (!pn)
return 1;
@@ -1053,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
+ if (mem_cgroup_subsys.disabled)
+ return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
@@ -1065,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
mm = get_task_mm(p);
if (mm == NULL)
return;
diff --git a/mm/slub.c b/mm/slub.c
index b72bc98e2dc1..acc975fcc8cc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1470,6 +1470,9 @@ static void *__slab_alloc(struct kmem_cache *s,
void **object;
struct page *new;
+ /* We handle __GFP_ZERO in the caller */
+ gfpflags &= ~__GFP_ZERO;
+
if (!c->page)
goto new_slab;
@@ -2685,7 +2688,7 @@ void kfree(const void *x)
}
EXPORT_SYMBOL(kfree);
-#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
static unsigned long count_partial(struct kmem_cache_node *n)
{
unsigned long flags;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cd75b21dd4c3..99c4f36eb8a3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -76,7 +76,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
pte_t entry;
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
set_pte_at(&init_mm, addr, pte, entry);
}
@@ -89,7 +89,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
if (pmd_none(*pmd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pmd_populate_kernel(&init_mm, pmd, p);
}
return pmd;
@@ -101,7 +101,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
if (pud_none(*pud)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pud_populate(&init_mm, pud, p);
}
return pud;
@@ -113,7 +113,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
if (pgd_none(*pgd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pgd_populate(&init_mm, pgd, p);
}
return pgd;