aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-06-16 15:32:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:42 -0700
commitc9e444103b5e7a5a3519f9913f59767f92e33baf (patch)
tree6b14020dc9271b5ef22ba34daf5494ef6572abb9 /mm/swapfile.c
parentmm: modify swap_map and add SWAP_HAS_CACHE flag (diff)
downloadlinux-dev-c9e444103b5e7a5a3519f9913f59767f92e33baf.tar.xz
linux-dev-c9e444103b5e7a5a3519f9913f59767f92e33baf.zip
mm: reuse unused swap entry if necessary
Presently we can know a swap entry is just used as SwapCache via swap_map, without looking up swap cache. Then, we have a chance to reuse swap-cache-only swap entries in get_swap_pages(). This patch tries to free swap-cache-only swap entries if swap is not enough. Note: We hit following path when swap_cluster code cannot find a free cluster. Then, vm_swap_full() is not only condition to allow the kernel to reclaim unused swap. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@in.ibm.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0d7296971ad9..28faa01cf578 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache)
return ret;
}
+/* returnes 1 if swap entry is freed */
+static int
+__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
+{
+ int type = si - swap_info;
+ swp_entry_t entry = swp_entry(type, offset);
+ struct page *page;
+ int ret = 0;
+
+ page = find_get_page(&swapper_space, entry.val);
+ if (!page)
+ return 0;
+ /*
+ * This function is called from scan_swap_map() and it's called
+ * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
+ * We have to use trylock for avoiding deadlock. This is a special
+ * case and you should use try_to_free_swap() with explicit lock_page()
+ * in usual operations.
+ */
+ if (trylock_page(page)) {
+ ret = try_to_free_swap(page);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+ return ret;
+}
/*
* We need this because the bdev->unplug_fn can sleep and we cannot
@@ -301,6 +327,19 @@ checks:
goto no_page;
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
+
+ /* reuse swap entry of cache-only swap if not busy. */
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ int swap_was_freed;
+ spin_unlock(&swap_lock);
+ swap_was_freed = __try_to_reclaim_swap(si, offset);
+ spin_lock(&swap_lock);
+ /* entry was freed successfully, try to use this again */
+ if (swap_was_freed)
+ goto checks;
+ goto scan; /* check next one */
+ }
+
if (si->swap_map[offset])
goto scan;
@@ -382,6 +421,10 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ spin_lock(&swap_lock);
+ goto checks;
+ }
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
@@ -393,6 +436,10 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ spin_lock(&swap_lock);
+ goto checks;
+ }
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;