aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-05-12 20:23:02 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-05-13 07:20:15 -0700
commit09c02e56327bdaf9cdbb2742a35fb8c6a6f9a6c7 (patch)
treefd773c9b742b471f189984aca2da30a7d038afba
parentswap: turn get_swap_page() into folio_alloc_swap() (diff)
downloadlinux-dev-09c02e56327bdaf9cdbb2742a35fb8c6a6f9a6c7.tar.xz
linux-dev-09c02e56327bdaf9cdbb2742a35fb8c6a6f9a6c7.zip
swap: convert add_to_swap() to take a folio
The only caller already has a folio available, so this saves a conversion. Also convert the return type to boolean. Link: https://lkml.kernel.org/r/20220504182857.4013401-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/swap.h6
-rw-r--r--mm/swap_state.c47
-rw-r--r--mm/vmscan.c6
3 files changed, 31 insertions, 28 deletions
diff --git a/mm/swap.h b/mm/swap.h
index a6da8f612904..0193797b0c92 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -32,7 +32,7 @@ extern struct address_space *swapper_spaces[];
>> SWAP_ADDRESS_SPACE_SHIFT])
void show_swap_cache_info(void);
-int add_to_swap(struct page *page);
+bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp);
@@ -119,9 +119,9 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
return find_get_page(mapping, index);
}
-static inline int add_to_swap(struct page *page)
+static inline bool add_to_swap(struct folio *folio)
{
- return 0;
+ return false;
}
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 2dc0d02f8d79..416aaaa8a7ed 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -176,24 +176,26 @@ void __delete_from_swap_cache(struct page *page,
}
/**
- * add_to_swap - allocate swap space for a page
- * @page: page we want to move to swap
+ * add_to_swap - allocate swap space for a folio
+ * @folio: folio we want to move to swap
*
- * Allocate swap space for the page and add the page to the
- * swap cache. Caller needs to hold the page lock.
+ * Allocate swap space for the folio and add the folio to the
+ * swap cache.
+ *
+ * Context: Caller needs to hold the folio lock.
+ * Return: Whether the folio was added to the swap cache.
*/
-int add_to_swap(struct page *page)
+bool add_to_swap(struct folio *folio)
{
- struct folio *folio = page_folio(page);
swp_entry_t entry;
int err;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageUptodate(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
entry = folio_alloc_swap(folio);
if (!entry.val)
- return 0;
+ return false;
/*
* XArray node allocations from PF_MEMALLOC contexts could
@@ -206,7 +208,7 @@ int add_to_swap(struct page *page)
/*
* Add it to the swap cache.
*/
- err = add_to_swap_cache(page, entry,
+ err = add_to_swap_cache(&folio->page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
@@ -215,22 +217,23 @@ int add_to_swap(struct page *page)
*/
goto fail;
/*
- * Normally the page will be dirtied in unmap because its pte should be
- * dirty. A special case is MADV_FREE page. The page's pte could have
- * dirty bit cleared but the page's SwapBacked bit is still set because
- * clearing the dirty bit and SwapBacked bit has no lock protected. For
- * such page, unmap will not set dirty bit for it, so page reclaim will
- * not write the page out. This can cause data corruption when the page
- * is swap in later. Always setting the dirty bit for the page solves
- * the problem.
+ * Normally the folio will be dirtied in unmap because its
+ * pte should be dirty. A special case is MADV_FREE page. The
+ * page's pte could have dirty bit cleared but the folio's
+ * SwapBacked flag is still set because clearing the dirty bit
+ * and SwapBacked flag has no lock protected. For such folio,
+ * unmap will not set dirty bit for it, so folio reclaim will
+ * not write the folio out. This can cause data corruption when
+ * the folio is swapped in later. Always setting the dirty flag
+ * for the folio solves the problem.
*/
- set_page_dirty(page);
+ folio_mark_dirty(folio);
- return 1;
+ return true;
fail:
- put_swap_page(page, entry);
- return 0;
+ put_swap_page(&folio->page, entry);
+ return false;
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2837e7e3677c..f2fc5aa38cb8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1731,8 +1731,8 @@ retry:
page_list))
goto activate_locked;
}
- if (!add_to_swap(page)) {
- if (!PageTransHuge(page))
+ if (!add_to_swap(folio)) {
+ if (!folio_test_large(folio))
goto activate_locked_split;
/* Fallback to swap normal pages */
if (split_folio_to_list(folio,
@@ -1741,7 +1741,7 @@ retry:
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_vm_event(THP_SWPOUT_FALLBACK);
#endif
- if (!add_to_swap(page))
+ if (!add_to_swap(folio))
goto activate_locked_split;
}