aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2007-01-30 02:11:09 -0800
committerTony Luck <tony.luck@intel.com>2007-02-05 15:07:47 -0800
commit139b830477ccdca21b68c40f9a83ec327e65eb56 (patch)
tree0aab2140315579525dfef89189b9bea5033af2ba
parent[IA64] Enable SWIOTLB only when needed (diff)
downloadlinux-dev-139b830477ccdca21b68c40f9a83ec327e65eb56.tar.xz
linux-dev-139b830477ccdca21b68c40f9a83ec327e65eb56.zip
[IA64] register memory ranges in a consistent manner
While pursuing and unrelated issue with 64Mb granules I noticed a problem related to inconsistent use of add_active_range. There doesn't appear any reason to me why FLATMEM versus DISCONTIG_MEM should register memory to add_active_range with different code. So I've changed the code into a common implementation. The other subtle issue fixed by this patch was calling add_active_range in count_node_pages before granule aligning is performed. We were lucky with 16MB granules but not so with 64MB granules. count_node_pages has reserved regions filtered out and as a consequence linked kernel text and data aren't covered by calls to count_node_pages. So linked kernel regions wasn't reported to add_active_regions. This resulted in free_initmem causing numerous bad_page reports. This won't occur with this patch because now all known memory regions are reported by register_active_ranges. Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Bob Picco <bob.picco@hp.com> Acked-by: Simon Horman <horms@verge.net.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c19
-rw-r--r--include/asm-ia64/meminit.h3
3 files changed, 22 insertions, 4 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d3edb12f3cf9..999cefd2b226 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -473,6 +473,9 @@ void __init find_memory(void)
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(register_active_ranges, NULL);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@@ -660,7 +663,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..8b7599808dd5 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@@ -595,13 +596,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index c8df75901083..6dd476b652c6 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -51,12 +51,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
+extern int register_active_ranges(u64 start, u64 end, void *arg);
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long vmalloc_end;
extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg);
- extern int register_active_ranges (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else