aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm_types.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-09-27 01:50:01 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:26:12 -0700
commit5b99cd0effaf846240a15441aec459a592577eaf (patch)
tree355772422c716698762030e6261596c2ba484a37 /include/linux/mm_types.h
parent[PATCH] vm: add per-zone writeout counter (diff)
downloadlinux-dev-5b99cd0effaf846240a15441aec459a592577eaf.tar.xz
linux-dev-5b99cd0effaf846240a15441aec459a592577eaf.zip
[PATCH] own header file for struct page
This moves the definition of struct page from mm.h to its own header file page-struct.h. This is a prereq to fix SetPageUptodate which is broken on s390: #define SetPageUptodate(_page) do { struct page *__page = (_page); if (!test_and_set_bit(PG_uptodate, &__page->flags)) page_test_and_clear_dirty(_page); } while (0) _page gets used twice in this macro which can cause subtle bugs. Using __page for the page_test_and_clear_dirty call doesn't work since it causes yet another problem with the page_test_and_clear_dirty macro as well. In order to avoid all these problems caused by macros it seems to be a good idea to get rid of them and convert them to static inline functions. Because of header file include order it's necessary to have a seperate header file for the struct page definition. Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r--include/linux/mm_types.h67
1 files changed, 67 insertions, 0 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
new file mode 100644
index 000000000000..c3852fd4a1cc
--- /dev/null
+++ b/include/linux/mm_types.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_MM_TYPES_H
+#define _LINUX_MM_TYPES_H
+
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct address_space;
+
+/*
+ * Each physical page in the system has a struct page associated with
+ * it to keep track of whatever it is we are using the page for at the
+ * moment. Note that we have no way to track which tasks are using
+ * a page, though if it is a pagecache page, rmap structures can tell us
+ * who is mapping it.
+ */
+struct page {
+ unsigned long flags; /* Atomic flags, some possibly
+ * updated asynchronously */
+ atomic_t _count; /* Usage count, see below. */
+ atomic_t _mapcount; /* Count of ptes mapped in mms,
+ * to show when page is mapped
+ * & limit reverse map searches.
+ */
+ union {
+ struct {
+ unsigned long private; /* Mapping-private opaque data:
+ * usually used for buffer_heads
+ * if PagePrivate set; used for
+ * swp_entry_t if PageSwapCache;
+ * indicates order in the buddy
+ * system if PG_buddy is set.
+ */
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ };
+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+ spinlock_t ptl;
+#endif
+ };
+ pgoff_t index; /* Our offset within mapping. */
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ */
+ /*
+ * On machines where all RAM is mapped into kernel address space,
+ * we can simply calculate the virtual address. On machines with
+ * highmem some memory is mapped into kernel virtual memory
+ * dynamically, so we need a place to store that address.
+ * Note that this field could be 16 bits on x86 ... ;)
+ *
+ * Architectures with slow multiplication can define
+ * WANT_PAGE_VIRTUAL in asm/page.h
+ */
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual; /* Kernel virtual address (NULL if
+ not kmapped, ie. highmem) */
+#endif /* WANT_PAGE_VIRTUAL */
+};
+
+#endif /* _LINUX_MM_TYPES_H */