aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c174
1 files changed, 125 insertions, 49 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e97a0e5aea91..c72c648f750c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,9 +47,11 @@ static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
-long nr_swap_pages;
+atomic_long_t nr_swap_pages;
+/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;
+static atomic_t highest_priority_index = ATOMIC_INIT(-1);
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
@@ -79,7 +81,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
struct page *page;
int ret = 0;
- page = find_get_page(&swapper_space, entry.val);
+ page = find_get_page(swap_address_space(entry), entry.val);
if (!page)
return 0;
/*
@@ -223,7 +225,7 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
si->lowest_alloc = si->max;
si->highest_alloc = 0;
}
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
/*
* If seek is expensive, start searching for new cluster from
@@ -242,7 +244,7 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
@@ -263,7 +265,7 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
@@ -277,7 +279,7 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
}
offset = scan_base;
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
si->lowest_alloc = 0;
}
@@ -293,9 +295,9 @@ checks:
/* reuse swap entry of cache-only swap if not busy. */
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed)
goto checks;
@@ -335,13 +337,13 @@ checks:
si->lowest_alloc <= last_in_cluster)
last_in_cluster = si->lowest_alloc - 1;
si->flags |= SWP_DISCARDING;
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
if (offset < last_in_cluster)
discard_swap_cluster(si, offset,
last_in_cluster - offset + 1);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
si->lowest_alloc = 0;
si->flags &= ~SWP_DISCARDING;
@@ -355,10 +357,10 @@ checks:
* could defer that delay until swap_writepage,
* but it's easier to keep this self-contained.
*/
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
wait_for_discard, TASK_UNINTERRUPTIBLE);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
} else {
/*
* Note pages allocated by racing tasks while
@@ -374,14 +376,14 @@ checks:
return offset;
scan:
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -392,11 +394,11 @@ scan:
offset = si->lowest_bit;
while (++offset < scan_base) {
if (!si->swap_map[offset]) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -404,7 +406,7 @@ scan:
latency_ration = LATENCY_LIMIT;
}
}
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
no_page:
si->flags -= SWP_SCANNING;
@@ -417,13 +419,34 @@ swp_entry_t get_swap_page(void)
pgoff_t offset;
int type, next;
int wrapped = 0;
+ int hp_index;
spin_lock(&swap_lock);
- if (nr_swap_pages <= 0)
+ if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
- nr_swap_pages--;
+ atomic_long_dec(&nr_swap_pages);
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
+ hp_index = atomic_xchg(&highest_priority_index, -1);
+ /*
+ * highest_priority_index records current highest priority swap
+ * type which just frees swap entries. If its priority is
+ * higher than that of swap_list.next swap type, we use it. It
+ * isn't protected by swap_lock, so it can be an invalid value
+ * if the corresponding swap type is swapoff. We double check
+ * the flags here. It's even possible the swap type is swapoff
+ * and swapon again and its priority is changed. In such rare
+ * case, low prority swap type might be used, but eventually
+ * high priority swap will be used after several rounds of
+ * swap.
+ */
+ if (hp_index != -1 && hp_index != type &&
+ swap_info[type]->prio < swap_info[hp_index]->prio &&
+ (swap_info[hp_index]->flags & SWP_WRITEOK)) {
+ type = hp_index;
+ swap_list.next = type;
+ }
+
si = swap_info[type];
next = si->next;
if (next < 0 ||
@@ -432,22 +455,29 @@ swp_entry_t get_swap_page(void)
wrapped++;
}
- if (!si->highest_bit)
+ spin_lock(&si->lock);
+ if (!si->highest_bit) {
+ spin_unlock(&si->lock);
continue;
- if (!(si->flags & SWP_WRITEOK))
+ }
+ if (!(si->flags & SWP_WRITEOK)) {
+ spin_unlock(&si->lock);
continue;
+ }
swap_list.next = next;
+
+ spin_unlock(&swap_lock);
/* This is called for allocating swap entry for cache */
offset = scan_swap_map(si, SWAP_HAS_CACHE);
- if (offset) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
+ if (offset)
return swp_entry(type, offset);
- }
+ spin_lock(&swap_lock);
next = swap_list.next;
}
- nr_swap_pages++;
+ atomic_long_inc(&nr_swap_pages);
noswap:
spin_unlock(&swap_lock);
return (swp_entry_t) {0};
@@ -459,19 +489,19 @@ swp_entry_t get_swap_page_of_type(int type)
struct swap_info_struct *si;
pgoff_t offset;
- spin_lock(&swap_lock);
si = swap_info[type];
+ spin_lock(&si->lock);
if (si && (si->flags & SWP_WRITEOK)) {
- nr_swap_pages--;
+ atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return swp_entry(type, offset);
}
- nr_swap_pages++;
+ atomic_long_inc(&nr_swap_pages);
}
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return (swp_entry_t) {0};
}
@@ -493,7 +523,7 @@ static struct swap_info_struct *swap_info_get(swp_entry_t entry)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
- spin_lock(&swap_lock);
+ spin_lock(&p->lock);
return p;
bad_free:
@@ -511,6 +541,27 @@ out:
return NULL;
}
+/*
+ * This swap type frees swap entry, check if it is the highest priority swap
+ * type which just frees swap entry. get_swap_page() uses
+ * highest_priority_index to search highest priority swap type. The
+ * swap_info_struct.lock can't protect us if there are multiple swap types
+ * active, so we use atomic_cmpxchg.
+ */
+static void set_highest_priority_index(int type)
+{
+ int old_hp_index, new_hp_index;
+
+ do {
+ old_hp_index = atomic_read(&highest_priority_index);
+ if (old_hp_index != -1 &&
+ swap_info[old_hp_index]->prio >= swap_info[type]->prio)
+ break;
+ new_hp_index = type;
+ } while (atomic_cmpxchg(&highest_priority_index,
+ old_hp_index, new_hp_index) != old_hp_index);
+}
+
static unsigned char swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
@@ -553,10 +604,8 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
- if (swap_list.next >= 0 &&
- p->prio > swap_info[swap_list.next]->prio)
- swap_list.next = p->type;
- nr_swap_pages++;
+ set_highest_priority_index(p->type);
+ atomic_long_inc(&nr_swap_pages);
p->inuse_pages--;
frontswap_invalidate_page(p->type, offset);
if (p->flags & SWP_BLKDEV) {
@@ -581,7 +630,7 @@ void swap_free(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, 1);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
}
@@ -598,7 +647,7 @@ void swapcache_free(swp_entry_t entry, struct page *page)
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
if (page)
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
}
@@ -617,7 +666,7 @@ int page_swapcount(struct page *page)
p = swap_info_get(entry);
if (p) {
count = swap_count(p->swap_map[swp_offset(entry)]);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
return count;
}
@@ -699,13 +748,14 @@ int free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
- page = find_get_page(&swapper_space, entry.val);
+ page = find_get_page(swap_address_space(entry),
+ entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
}
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
if (page) {
/*
@@ -803,11 +853,13 @@ unsigned int count_swap_pages(int type, int free)
if ((unsigned int)type < nr_swapfiles) {
struct swap_info_struct *sis = swap_info[type];
+ spin_lock(&sis->lock);
if (sis->flags & SWP_WRITEOK) {
n = sis->pages;
if (free)
n -= sis->inuse_pages;
}
+ spin_unlock(&sis->lock);
}
spin_unlock(&swap_lock);
return n;
@@ -822,11 +874,17 @@ unsigned int count_swap_pages(int type, int free)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
+ struct page *swapcache;
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;
+ swapcache = page;
+ page = ksm_might_need_to_copy(page, vma, addr);
+ if (unlikely(!page))
+ return -ENOMEM;
+
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_KERNEL, &memcg)) {
ret = -ENOMEM;
@@ -845,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
- page_add_anon_rmap(page, vma, addr);
+ if (page == swapcache)
+ page_add_anon_rmap(page, vma, addr);
+ else /* ksm created a completely new copy */
+ page_add_new_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, memcg);
swap_free(entry);
/*
@@ -856,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
out:
pte_unmap_unlock(pte, ptl);
out_nolock:
+ if (page != swapcache) {
+ unlock_page(page);
+ put_page(page);
+ }
return ret;
}
@@ -1456,7 +1521,7 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
p->swap_map = swap_map;
frontswap_map_set(p, frontswap_map);
p->flags |= SWP_WRITEOK;
- nr_swap_pages += p->pages;
+ atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
/* insert swap space into swap_list: */
@@ -1478,15 +1543,19 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
unsigned long *frontswap_map)
{
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
_enable_swap_info(p, prio, swap_map, frontswap_map);
frontswap_init(p->type);
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
_enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1546,14 +1615,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
+ spin_lock(&p->lock);
if (p->prio < 0) {
for (i = p->next; i >= 0; i = swap_info[i]->next)
swap_info[i]->prio = p->prio--;
least_priority++;
}
- nr_swap_pages -= p->pages;
+ atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
set_current_oom_origin();
@@ -1572,14 +1643,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
drain_mmlist();
/* wait for anyone still in scan_swap_map */
p->highest_bit = 0; /* cuts scans short */
while (p->flags >= SWP_SCANNING) {
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
}
swap_file = p->swap_file;
@@ -1589,6 +1663,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->swap_map = NULL;
p->flags = 0;
frontswap_invalidate_area(type);
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
@@ -1794,6 +1869,7 @@ static struct swap_info_struct *alloc_swap_info(void)
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
+ spin_lock_init(&p->lock);
return p;
}
@@ -2116,7 +2192,7 @@ void si_swapinfo(struct sysinfo *val)
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
- val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
@@ -2149,7 +2225,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
p = swap_info[type];
offset = swp_offset(entry);
- spin_lock(&swap_lock);
+ spin_lock(&p->lock);
if (unlikely(offset >= p->max))
goto unlock_out;
@@ -2184,7 +2260,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
p->swap_map[offset] = count | has_cache;
unlock_out:
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
out:
return err;
@@ -2309,7 +2385,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
}
if (!page) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return -ENOMEM;
}
@@ -2357,7 +2433,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
out:
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
outer:
if (page)
__free_page(page);