aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLee Schermerhorn <Lee.Schermerhorn@hp.com>2008-10-18 20:26:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 08:50:26 -0700
commit89e004ea55abe201b29e2d6e35124101f1288ef7 (patch)
tree272a8f453106fd33d66fd7153f44696648dbe8b6 /include/linux
parentRamfs and Ram Disk pages are unevictable (diff)
downloadlinux-dev-89e004ea55abe201b29e2d6e35124101f1288ef7.tar.xz
linux-dev-89e004ea55abe201b29e2d6e35124101f1288ef7.zip
SHM_LOCKED pages are unevictable
Shmem segments locked into memory via shmctl(SHM_LOCKED) should not be kept on the normal LRU, since scanning them is a waste of time and might throw off kswapd's balancing algorithms. Place them on the unevictable LRU list instead. Use the AS_UNEVICTABLE flag to mark address_space of SHM_LOCKed shared memory regions as unevictable. Then these pages will be culled off the normal LRU lists during vmscan. Add new wrapper function to clear the mapping's unevictable state when/if shared memory segment is munlocked. Add 'scan_mapping_unevictable_page()' to mm/vmscan.c to scan all pages in the shmem segment's mapping [struct address_space] for evictability now that they're no longer locked. If so, move them to the appropriate zone lru list. Changes depend on [CONFIG_]UNEVICTABLE_LRU. [kosaki.motohiro@jp.fujitsu.com: revert shm change] Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/swap.h4
3 files changed, 15 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c61ba10768ea..40236290e2ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -700,10 +700,10 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
static inline int shmem_lock(struct file *file, int lock,
- struct user_struct *user)
+ struct user_struct *user)
{
return 0;
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 09164d2c5c27..4b6c4d8d26b8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,14 +40,20 @@ static inline void mapping_set_unevictable(struct address_space *mapping)
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
static inline int mapping_unevictable(struct address_space *mapping)
{
- if (mapping && (mapping->flags & AS_UNEVICTABLE))
- return 1;
- return 0;
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
}
#else
static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
static inline int mapping_unevictable(struct address_space *mapping)
{
return 0;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a2113044d20a..7edb4cbc29f9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -232,12 +232,16 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
#ifdef CONFIG_UNEVICTABLE_LRU
extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+extern void scan_mapping_unevictable_pages(struct address_space *);
#else
static inline int page_evictable(struct page *page,
struct vm_area_struct *vma)
{
return 1;
}
+static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+}
#endif
extern int kswapd_run(int nid);