aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/asm-generic/memory_model.h
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2006-06-23 02:03:12 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 07:42:47 -0700
commit67de648211fa041fe08a0c25241a4980bbb90698 (patch)
tree94f737e0fed7c40dc59e94f05d6851e490082068 /include/asm-generic/memory_model.h
parent[PATCH] wait_table and zonelist initializing for memory hotadd: update zonelists (diff)
downloadwireguard-linux-67de648211fa041fe08a0c25241a4980bbb90698.tar.xz
wireguard-linux-67de648211fa041fe08a0c25241a4980bbb90698.zip
[PATCH] squash duplicate page_to_pfn and pfn_to_page
We have architectures where the size of page_to_pfn and pfn_to_page are significant enough to overall image size that they wish to push them out of line. However, in the process we have grown a second copy of the implementation of each of these routines for each memory model. Share the implmentation exposing it either inline or out-of-line as required. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-generic/memory_model.h')
-rw-r--r--include/asm-generic/memory_model.h27
1 files changed, 15 insertions, 12 deletions
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 0cfb086dd373..8078cbd2c016 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -23,29 +23,23 @@
#endif /* CONFIG_DISCONTIGMEM */
-#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-struct page;
-/* this is useful when inlined pfn_to_page is too big */
-extern struct page *pfn_to_page(unsigned long pfn);
-extern unsigned long page_to_pfn(struct page *page);
-#else
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
-#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
@@ -57,18 +51,27 @@ extern unsigned long page_to_pfn(struct page *page);
* Note: section's mem_map is encorded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
__pg - __section_mem_map_addr(__nr_to_section(__sec)); \
})
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+
+#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
+struct page;
+/* this is useful when inlined pfn_to_page is too big */
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+#else
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
#endif /* __ASSEMBLY__ */