aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-21 11:27:31 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:59:03 -0400
commitb3ac04132c4b9bc5c9c14608424d410e7ca3b400 (patch)
tree8cb5d7604e0a6c332fa49291a9fcbc04823b4564 /mm/vmscan.c
parentmm/mlock: Add mlock_vma_folio() (diff)
downloadlinux-dev-b3ac04132c4b9bc5c9c14608424d410e7ca3b400.tar.xz
linux-dev-b3ac04132c4b9bc5c9c14608424d410e7ca3b400.zip
mm/rmap: Turn page_referenced() into folio_referenced()
Both its callers pass a page which was previously on an LRU list, so were passing a folio by definition. Use the type system to enforce that and remove a few calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 94729d2d1125..38f124c41bcd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1386,11 +1386,12 @@ enum page_references {
static enum page_references page_check_references(struct page *page,
struct scan_control *sc)
{
+ struct folio *folio = page_folio(page);
int referenced_ptes, referenced_page;
unsigned long vm_flags;
- referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
- &vm_flags);
+ referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
+ &vm_flags);
referenced_page = TestClearPageReferenced(page);
/*
@@ -2490,7 +2491,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
*
* If the pages are mostly unmapped, the processing is fast and it is
* appropriate to hold lru_lock across the whole operation. But if
- * the pages are mapped, the processing is slow (page_referenced()), so
+ * the pages are mapped, the processing is slow (folio_referenced()), so
* we should drop lru_lock around each page. It's impossible to balance
* this, so instead we remove the pages from the LRU while processing them.
* It is safe to rely on PG_active against the non-LRU pages in here because
@@ -2510,7 +2511,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
- struct page *page;
unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0;
int file = is_file_lru(lru);
@@ -2532,9 +2532,13 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_unlock_irq(&lruvec->lru_lock);
while (!list_empty(&l_hold)) {
+ struct folio *folio;
+ struct page *page;
+
cond_resched();
- page = lru_to_page(&l_hold);
- list_del(&page->lru);
+ folio = lru_to_folio(&l_hold);
+ list_del(&folio->lru);
+ page = &folio->page;
if (unlikely(!page_evictable(page))) {
putback_lru_page(page);
@@ -2549,8 +2553,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
}
}
- if (page_referenced(page, 0, sc->target_mem_cgroup,
- &vm_flags)) {
+ if (folio_referenced(folio, 0, sc->target_mem_cgroup,
+ &vm_flags)) {
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So