aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c477
1 files changed, 272 insertions, 205 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8e7ce9a9bc5e..438d0676c5be 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -21,8 +21,9 @@
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
-
-#include <asm/pgtable.h>
+#include <linux/shmem_fs.h>
+#include "internal.h"
+#include "swap.h"
/*
* swapper_space is a fiction, retained to simplify the path through
@@ -30,9 +31,9 @@
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
- .set_page_dirty = swap_set_page_dirty,
+ .dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION
- .migratepage = migrate_page,
+ .migrate_folio = migrate_folio,
#endif
};
@@ -58,72 +59,47 @@ static bool enable_vma_readahead __read_mostly = true;
#define GET_SWAP_RA_VAL(vma) \
(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
-#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
-#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
-
-static struct {
- unsigned long add_total;
- unsigned long del_total;
- unsigned long find_success;
- unsigned long find_total;
-} swap_cache_info;
-
-unsigned long total_swapcache_pages(void)
-{
- unsigned int i, j, nr;
- unsigned long ret = 0;
- struct address_space *spaces;
- struct swap_info_struct *si;
-
- for (i = 0; i < MAX_SWAPFILES; i++) {
- swp_entry_t entry = swp_entry(i, 1);
-
- /* Avoid get_swap_device() to warn for bad swap entry */
- if (!swp_swap_info(entry))
- continue;
- /* Prevent swapoff to free swapper_spaces */
- si = get_swap_device(entry);
- if (!si)
- continue;
- nr = nr_swapper_spaces[i];
- spaces = swapper_spaces[i];
- for (j = 0; j < nr; j++)
- ret += spaces[j].nrpages;
- put_swap_device(si);
- }
- return ret;
-}
-
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
- printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
- swap_cache_info.add_total, swap_cache_info.del_total,
- swap_cache_info.find_success, swap_cache_info.find_total);
printk("Free swap = %ldkB\n",
get_nr_swap_pages() << (PAGE_SHIFT - 10));
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+ struct address_space *address_space = swap_address_space(entry);
+ pgoff_t idx = swp_offset(entry);
+ struct page *page;
+
+ page = xa_load(&address_space->i_pages, idx);
+ if (xa_is_value(page))
+ return page;
+ return NULL;
+}
+
/*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles filemap_add_folio on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
+ gfp_t gfp, void **shadowp)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
- XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
- unsigned long i, nr = compound_nr(page);
+ XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
+ unsigned long i, nr = folio_nr_pages(folio);
+ void *old;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageSwapCache(page), page);
- VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
- page_ref_add(page, nr);
- SetPageSwapCache(page);
+ folio_ref_add(folio, nr);
+ folio_set_swapcache(folio);
do {
xas_lock_irq(&xas);
@@ -131,14 +107,19 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
if (xas_error(&xas))
goto unlock;
for (i = 0; i < nr; i++) {
- VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
- set_page_private(page + i, entry.val + i);
- xas_store(&xas, page);
+ VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
+ old = xas_load(&xas);
+ if (xa_is_value(old)) {
+ if (shadowp)
+ *shadowp = old;
+ }
+ set_page_private(folio_page(folio, i), entry.val + i);
+ xas_store(&xas, folio);
xas_next(&xas);
}
address_space->nrpages += nr;
- __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
- ADD_CACHE_INFO(add_total, nr);
+ __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
@@ -146,56 +127,61 @@ unlock:
if (!xas_error(&xas))
return 0;
- ClearPageSwapCache(page);
- page_ref_sub(page, nr);
+ folio_clear_swapcache(folio);
+ folio_ref_sub(folio, nr);
return xas_error(&xas);
}
/*
- * This must be called only on pages that have
+ * This must be called only on folios that have
* been verified to be in the swap cache.
*/
-void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
+void __delete_from_swap_cache(struct folio *folio,
+ swp_entry_t entry, void *shadow)
{
struct address_space *address_space = swap_address_space(entry);
- int i, nr = hpage_nr_pages(page);
+ int i;
+ long nr = folio_nr_pages(folio);
pgoff_t idx = swp_offset(entry);
XA_STATE(xas, &address_space->i_pages, idx);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
- VM_BUG_ON_PAGE(PageWriteback(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
for (i = 0; i < nr; i++) {
- void *entry = xas_store(&xas, NULL);
- VM_BUG_ON_PAGE(entry != page, entry);
- set_page_private(page + i, 0);
+ void *entry = xas_store(&xas, shadow);
+ VM_BUG_ON_PAGE(entry != folio, entry);
+ set_page_private(folio_page(folio, i), 0);
xas_next(&xas);
}
- ClearPageSwapCache(page);
+ folio_clear_swapcache(folio);
address_space->nrpages -= nr;
- __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
- ADD_CACHE_INFO(del_total, nr);
+ __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
}
/**
- * add_to_swap - allocate swap space for a page
- * @page: page we want to move to swap
+ * add_to_swap - allocate swap space for a folio
+ * @folio: folio we want to move to swap
*
- * Allocate swap space for the page and add the page to the
- * swap cache. Caller needs to hold the page lock.
+ * Allocate swap space for the folio and add the folio to the
+ * swap cache.
+ *
+ * Context: Caller needs to hold the folio lock.
+ * Return: Whether the folio was added to the swap cache.
*/
-int add_to_swap(struct page *page)
+bool add_to_swap(struct folio *folio)
{
swp_entry_t entry;
int err;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageUptodate(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
- entry = get_swap_page(page);
+ entry = folio_alloc_swap(folio);
if (!entry.val)
- return 0;
+ return false;
/*
* XArray node allocations from PF_MEMALLOC contexts could
@@ -208,8 +194,8 @@ int add_to_swap(struct page *page)
/*
* Add it to the swap cache.
*/
- err = add_to_swap_cache(page, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ err = add_to_swap_cache(folio, entry,
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -217,56 +203,88 @@ int add_to_swap(struct page *page)
*/
goto fail;
/*
- * Normally the page will be dirtied in unmap because its pte should be
- * dirty. A special case is MADV_FREE page. The page'e pte could have
- * dirty bit cleared but the page's SwapBacked bit is still set because
- * clearing the dirty bit and SwapBacked bit has no lock protected. For
- * such page, unmap will not set dirty bit for it, so page reclaim will
- * not write the page out. This can cause data corruption when the page
- * is swap in later. Always setting the dirty bit for the page solves
- * the problem.
+ * Normally the folio will be dirtied in unmap because its
+ * pte should be dirty. A special case is MADV_FREE page. The
+ * page's pte could have dirty bit cleared but the folio's
+ * SwapBacked flag is still set because clearing the dirty bit
+ * and SwapBacked flag has no lock protected. For such folio,
+ * unmap will not set dirty bit for it, so folio reclaim will
+ * not write the folio out. This can cause data corruption when
+ * the folio is swapped in later. Always setting the dirty flag
+ * for the folio solves the problem.
*/
- set_page_dirty(page);
+ folio_mark_dirty(folio);
- return 1;
+ return true;
fail:
- put_swap_page(page, entry);
- return 0;
+ put_swap_folio(folio, entry);
+ return false;
}
/*
- * This must be called only on pages that have
+ * This must be called only on folios that have
* been verified to be in the swap cache and locked.
- * It will never put the page into the free list,
- * the caller has a reference on the page.
+ * It will never put the folio into the free list,
+ * the caller has a reference on the folio.
*/
-void delete_from_swap_cache(struct page *page)
+void delete_from_swap_cache(struct folio *folio)
{
- swp_entry_t entry = { .val = page_private(page) };
+ swp_entry_t entry = folio_swap_entry(folio);
struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(page, entry);
+ __delete_from_swap_cache(folio, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
- put_swap_page(page, entry);
- page_ref_sub(page, hpage_nr_pages(page));
+ put_swap_folio(folio, entry);
+ folio_ref_sub(folio, folio_nr_pages(folio));
+}
+
+void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long curr = begin;
+ void *old;
+
+ for (;;) {
+ swp_entry_t entry = swp_entry(type, curr);
+ struct address_space *address_space = swap_address_space(entry);
+ XA_STATE(xas, &address_space->i_pages, curr);
+
+ xa_lock_irq(&address_space->i_pages);
+ xas_for_each(&xas, old, end) {
+ if (!xa_is_value(old))
+ continue;
+ xas_store(&xas, NULL);
+ }
+ xa_unlock_irq(&address_space->i_pages);
+
+ /* search the next swapcache until we meet end */
+ curr >>= SWAP_ADDRESS_SPACE_SHIFT;
+ curr++;
+ curr <<= SWAP_ADDRESS_SPACE_SHIFT;
+ if (curr > end)
+ break;
+ }
}
/*
* If we are the only user, then try to free up the swap cache.
*
- * Its ok to check for PageSwapCache without the page lock
+ * Its ok to check the swapcache flag without the folio lock
* here because we are going to recheck again inside
- * try_to_free_swap() _with_ the lock.
+ * folio_free_swap() _with_ the lock.
* - Marcelo
*/
-static inline void free_swap_cache(struct page *page)
+void free_swap_cache(struct page *page)
{
- if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
+ folio_trylock(folio)) {
+ folio_free_swap(folio);
+ folio_unlock(folio);
}
}
@@ -302,37 +320,35 @@ static inline bool swap_use_vma_readahead(void)
}
/*
- * Lookup a swap entry in the swap cache. A found page will be returned
+ * Lookup a swap entry in the swap cache. A found folio will be returned
* unlocked and with its refcount incremented - we rely on the kernel
- * lock getting page table operations atomic even if we drop the page
+ * lock getting page table operations atomic even if we drop the folio
* lock before returning.
*/
-struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
- unsigned long addr)
+struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct page *page;
+ struct folio *folio;
struct swap_info_struct *si;
si = get_swap_device(entry);
if (!si)
return NULL;
- page = find_get_page(swap_address_space(entry), swp_offset(entry));
+ folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
put_swap_device(si);
- INC_CACHE_INFO(find_total);
- if (page) {
+ if (folio) {
bool vma_ra = swap_use_vma_readahead();
bool readahead;
- INC_CACHE_INFO(find_success);
/*
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
*/
- if (unlikely(PageTransCompound(page)))
- return page;
+ if (unlikely(folio_test_large(folio)))
+ return folio;
- readahead = TestClearPageReadahead(page);
+ readahead = folio_test_clear_readahead(folio);
if (vma && vma_ra) {
unsigned long ra_val;
int win, hits;
@@ -353,6 +369,43 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
}
}
+ return folio;
+}
+
+/**
+ * find_get_incore_page - Find and get a page from the page or swap caches.
+ * @mapping: The address_space to search.
+ * @index: The page cache index.
+ *
+ * This differs from find_get_page() in that it will also look for the
+ * page in the swap cache.
+ *
+ * Return: The found page or %NULL.
+ */
+struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
+{
+ swp_entry_t swp;
+ struct swap_info_struct *si;
+ struct page *page = pagecache_get_page(mapping, index,
+ FGP_ENTRY | FGP_HEAD, 0);
+
+ if (!page)
+ return page;
+ if (!xa_is_value(page))
+ return find_subpage(page, index);
+ if (!shmem_mapping(mapping))
+ return NULL;
+
+ swp = radix_to_swp_entry(page);
+ /* There might be swapin error entries in shmem mapping. */
+ if (non_swap_entry(swp))
+ return NULL;
+ /* Prevent swapoff from happening to us */
+ si = get_swap_device(swp);
+ if (!si)
+ return NULL;
+ page = find_get_page(swap_address_space(swp), swp_offset(swp));
+ put_swap_device(si);
return page;
}
@@ -360,25 +413,27 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated)
{
- struct page *found_page = NULL, *new_page = NULL;
struct swap_info_struct *si;
- int err;
+ struct folio *folio;
+ void *shadow = NULL;
+
*new_page_allocated = false;
- do {
+ for (;;) {
+ int err;
/*
* First check the swap cache. Since this is normally
- * called after lookup_swap_cache() failed, re-calling
+ * called after swap_cache_get_folio() failed, re-calling
* that would confuse statistics.
*/
si = get_swap_device(entry);
if (!si)
- break;
- found_page = find_get_page(swap_address_space(entry),
- swp_offset(entry));
+ return NULL;
+ folio = filemap_get_folio(swap_address_space(entry),
+ swp_offset(entry));
put_swap_device(si);
- if (found_page)
- break;
+ if (folio)
+ return folio_file_page(folio, swp_offset(entry));
/*
* Just skip read ahead for unused swap slot.
@@ -389,54 +444,67 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* else swap_off will be aborted if we return NULL.
*/
if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
- break;
+ return NULL;
/*
- * Get a new page to read into from swap.
+ * Get a new page to read into from swap. Allocate it now,
+ * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
+ * cause any racers to loop around until we add it to cache.
*/
- if (!new_page) {
- new_page = alloc_page_vma(gfp_mask, vma, addr);
- if (!new_page)
- break; /* Out of memory */
- }
+ folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
+ if (!folio)
+ return NULL;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
- if (err == -EEXIST) {
- /*
- * We might race against get_swap_page() and stumble
- * across a SWAP_HAS_CACHE swap_map entry whose page
- * has not been brought into the swapcache yet.
- */
- cond_resched();
- continue;
- } else if (err) /* swp entry is obsolete ? */
+ if (!err)
break;
- /* May fail (-ENOMEM) if XArray node allocation failed. */
- __SetPageLocked(new_page);
- __SetPageSwapBacked(new_page);
- err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
- if (likely(!err)) {
- /* Initiate read into locked page */
- SetPageWorkingset(new_page);
- lru_cache_add_anon(new_page);
- *new_page_allocated = true;
- return new_page;
- }
- __ClearPageLocked(new_page);
+ folio_put(folio);
+ if (err != -EEXIST)
+ return NULL;
+
/*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
+ * We might race against __delete_from_swap_cache(), and
+ * stumble across a swap_map entry whose SWAP_HAS_CACHE
+ * has not yet been cleared. Or race against another
+ * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
+ * in swap_map, but not yet added its page to swap cache.
*/
- put_swap_page(new_page, entry);
- } while (err != -ENOMEM);
+ schedule_timeout_uninterruptible(1);
+ }
- if (new_page)
- put_page(new_page);
- return found_page;
+ /*
+ * The swap entry is ours to swap in. Prepare the new page.
+ */
+
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
+ goto fail_unlock;
+
+ /* May fail (-ENOMEM) if XArray node allocation failed. */
+ if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ goto fail_unlock;
+
+ mem_cgroup_swapin_uncharge_swap(entry);
+
+ if (shadow)
+ workingset_refault(folio, shadow);
+
+ /* Caller will initiate read into locked folio */
+ folio_add_lru(folio);
+ *new_page_allocated = true;
+ return &folio->page;
+
+fail_unlock:
+ put_swap_folio(folio, entry);
+ folio_unlock(folio);
+ folio_put(folio);
+ return NULL;
}
/*
@@ -446,14 +514,16 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr, bool do_poll)
+ struct vm_area_struct *vma,
+ unsigned long addr, bool do_poll,
+ struct swap_iocb **plug)
{
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
vma, addr, &page_was_allocated);
if (page_was_allocated)
- swap_readpage(retpage, do_poll);
+ swap_readpage(retpage, do_poll, plug);
return retpage;
}
@@ -509,10 +579,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
return 1;
hits = atomic_xchg(&swapin_readahead_hits, 0);
- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
+ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
+ max_pages,
atomic_read(&last_readahead_pages));
if (!hits)
- prev_offset = offset;
+ WRITE_ONCE(prev_offset, offset);
atomic_set(&last_readahead_pages, pages);
return pages;
@@ -534,7 +605,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
- * Caller must hold read mmap_sem if vmf->vma is not NULL.
+ * Caller must hold read mmap_lock if vmf->vma is not NULL.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
@@ -546,6 +617,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
unsigned long mask;
struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug;
+ struct swap_iocb *splug = NULL;
bool do_poll = true, page_allocated;
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
@@ -554,13 +626,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!mask)
goto skip;
- /* Test swap type to make sure the dereference is safe */
- if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
- struct inode *inode = si->swap_file->f_mapping->host;
- if (inode_read_congested(inode))
- goto skip;
- }
-
do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
@@ -579,7 +644,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!page)
continue;
if (page_allocated) {
- swap_readpage(page, false);
+ swap_readpage(page, false, &splug);
if (offset != entry_offset) {
SetPageReadahead(page);
count_vm_event(SWAP_RA);
@@ -588,10 +653,12 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
put_page(page);
}
blk_finish_plug(&plug);
+ swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
- return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
+ /* The page was likely read above, so no need for plugging here */
+ return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -619,7 +686,12 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
void exit_swap_address_space(unsigned int type)
{
- kvfree(swapper_spaces[type]);
+ int i;
+ struct address_space *spaces = swapper_spaces[type];
+
+ for (i = 0; i < nr_swapper_spaces[type]; i++)
+ VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
+ kvfree(spaces);
nr_swapper_spaces[type] = 0;
swapper_spaces[type] = NULL;
}
@@ -642,7 +714,6 @@ static void swap_ra_info(struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
- swp_entry_t entry;
unsigned long faddr, pfn, fpfn;
unsigned long start, end;
pte_t *pte, *orig_pte;
@@ -660,11 +731,6 @@ static void swap_ra_info(struct vm_fault *vmf,
faddr = vmf->address;
orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
- entry = pte_to_swp_entry(*pte);
- if ((unlikely(non_swap_entry(entry)))) {
- pte_unmap(orig_pte);
- return;
- }
fpfn = PFN_DOWN(faddr);
ra_val = GET_SWAP_RA_VAL(vma);
@@ -707,29 +773,32 @@ static void swap_ra_info(struct vm_fault *vmf,
/**
* swap_vma_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
+ * @fentry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
* Returns the struct page for entry and addr, after queueing swapin.
*
- * Primitive swap readahead code. We simply read in a few pages whoes
+ * Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma.
*
- * Caller must hold read mmap_sem if vmf->vma is not NULL.
+ * Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct blk_plug plug;
+ struct swap_iocb *splug = NULL;
struct vm_area_struct *vma = vmf->vma;
struct page *page;
pte_t *pte, pentry;
swp_entry_t entry;
unsigned int i;
bool page_allocated;
- struct vma_swap_readahead ra_info = {0,};
+ struct vma_swap_readahead ra_info = {
+ .win = 1,
+ };
swap_ra_info(vmf, &ra_info);
if (ra_info.win == 1)
@@ -739,9 +808,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
i++, pte++) {
pentry = *pte;
- if (pte_none(pentry))
- continue;
- if (pte_present(pentry))
+ if (!is_swap_pte(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
@@ -751,7 +818,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (!page)
continue;
if (page_allocated) {
- swap_readpage(page, false);
+ swap_readpage(page, false, &splug);
if (i != ra_info.offset) {
SetPageReadahead(page);
count_vm_event(SWAP_RA);
@@ -760,10 +827,12 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
put_page(page);
}
blk_finish_plug(&plug);
+ swap_read_unplug(splug);
lru_add_drain();
skip:
+ /* The page was likely read above, so no need for plugging here */
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
- ra_info.win == 1);
+ ra_info.win == 1, NULL);
}
/**
@@ -790,31 +859,29 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
static ssize_t vma_ra_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
+ return sysfs_emit(buf, "%s\n",
+ enable_vma_readahead ? "true" : "false");
}
static ssize_t vma_ra_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
- enable_vma_readahead = true;
- else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
- enable_vma_readahead = false;
- else
- return -EINVAL;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &enable_vma_readahead);
+ if (ret)
+ return ret;
return count;
}
-static struct kobj_attribute vma_ra_enabled_attr =
- __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
- vma_ra_enabled_store);
+static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
static struct attribute *swap_attrs[] = {
&vma_ra_enabled_attr.attr,
NULL,
};
-static struct attribute_group swap_attr_group = {
+static const struct attribute_group swap_attr_group = {
.attrs = swap_attrs,
};