aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2022-03-24 18:13:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:06:51 -0700
commit03104c2c5db8918030788e607e4af980b2f42bb3 (patch)
treec9b7754ad398baeaa9c310a5b031c1bd5baf4103 /mm/swapfile.c
parentmm/khugepaged: remove reuse_swap_page() usage (diff)
downloadlinux-dev-03104c2c5db8918030788e607e4af980b2f42bb3.tar.xz
linux-dev-03104c2c5db8918030788e607e4af980b2f42bb3.zip
mm/swapfile: remove stale reuse_swap_page()
All users are gone, let's remove it. We'll let SWP_STABLE_WRITES stick around for now, as it might come in handy in the near future. Link: https://lkml.kernel.org/r/20220131162940.210846-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c104
1 files changed, 0 insertions, 104 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 33c7abb16610..63c61f8b2611 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1167,16 +1167,6 @@ out:
return NULL;
}
-static struct swap_info_struct *swap_info_get(swp_entry_t entry)
-{
- struct swap_info_struct *p;
-
- p = _swap_info_get(entry);
- if (p)
- spin_lock(&p->lock);
- return p;
-}
-
static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
struct swap_info_struct *q)
{
@@ -1601,100 +1591,6 @@ static bool page_swapped(struct page *page)
return false;
}
-static int page_trans_huge_map_swapcount(struct page *page,
- int *total_swapcount)
-{
- int i, map_swapcount, _total_swapcount;
- unsigned long offset = 0;
- struct swap_info_struct *si;
- struct swap_cluster_info *ci = NULL;
- unsigned char *map = NULL;
- int swapcount = 0;
-
- /* hugetlbfs shouldn't call it */
- VM_BUG_ON_PAGE(PageHuge(page), page);
-
- if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
- if (PageSwapCache(page))
- swapcount = page_swapcount(page);
- if (total_swapcount)
- *total_swapcount = swapcount;
- return swapcount + page_trans_huge_mapcount(page);
- }
-
- page = compound_head(page);
-
- _total_swapcount = map_swapcount = 0;
- if (PageSwapCache(page)) {
- swp_entry_t entry;
-
- entry.val = page_private(page);
- si = _swap_info_get(entry);
- if (si) {
- map = si->swap_map;
- offset = swp_offset(entry);
- }
- }
- if (map)
- ci = lock_cluster(si, offset);
- for (i = 0; i < HPAGE_PMD_NR; i++) {
- int mapcount = atomic_read(&page[i]._mapcount) + 1;
- if (map) {
- swapcount = swap_count(map[offset + i]);
- _total_swapcount += swapcount;
- }
- map_swapcount = max(map_swapcount, mapcount + swapcount);
- }
- unlock_cluster(ci);
-
- if (PageDoubleMap(page))
- map_swapcount -= 1;
-
- if (total_swapcount)
- *total_swapcount = _total_swapcount;
-
- return map_swapcount + compound_mapcount(page);
-}
-
-/*
- * We can write to an anon page without COW if there are no other references
- * to it. And as a side-effect, free up its swap: because the old content
- * on disk will never be read, and seeking back there to write new content
- * later would only waste time away from clustering.
- */
-bool reuse_swap_page(struct page *page)
-{
- int count, total_swapcount;
-
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (unlikely(PageKsm(page)))
- return false;
- count = page_trans_huge_map_swapcount(page, &total_swapcount);
- if (count == 1 && PageSwapCache(page) &&
- (likely(!PageTransCompound(page)) ||
- /* The remaining swap count will be freed soon */
- total_swapcount == page_swapcount(page))) {
- if (!PageWriteback(page)) {
- page = compound_head(page);
- delete_from_swap_cache(page);
- SetPageDirty(page);
- } else {
- swp_entry_t entry;
- struct swap_info_struct *p;
-
- entry.val = page_private(page);
- p = swap_info_get(entry);
- if (p->flags & SWP_STABLE_WRITES) {
- spin_unlock(&p->lock);
- return false;
- }
- spin_unlock(&p->lock);
- }
- }
-
- return count <= 1;
-}
-
/*
* If swap is getting full, or if there are no more mappings of this page,
* then try_to_free_swap is called to free its swap space.