diff options
Diffstat (limited to '')
-rw-r--r-- | mm/page_idle.c | 58 |
1 files changed, 20 insertions, 38 deletions
diff --git a/mm/page_idle.c b/mm/page_idle.c index 295512465065..bc08332a609c 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -4,6 +4,7 @@ #include <linux/fs.h> #include <linux/sysfs.h> #include <linux/kobject.h> +#include <linux/memory_hotplug.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/pagemap.h> @@ -12,6 +13,8 @@ #include <linux/page_ext.h> #include <linux/page_idle.h> +#include "internal.h" + #define BITMAP_CHUNK_SIZE sizeof(u64) #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) @@ -30,36 +33,24 @@ */ static struct page *page_idle_get_page(unsigned long pfn) { - struct page *page; - pg_data_t *pgdat; - - if (!pfn_valid(pfn)) - return NULL; + struct page *page = pfn_to_online_page(pfn); - page = pfn_to_page(pfn); if (!page || !PageLRU(page) || !get_page_unless_zero(page)) return NULL; - pgdat = page_pgdat(page); - spin_lock_irq(&pgdat->lru_lock); if (unlikely(!PageLRU(page))) { put_page(page); page = NULL; } - spin_unlock_irq(&pgdat->lru_lock); return page; } -static bool page_idle_clear_pte_refs_one(struct page *page, +static bool page_idle_clear_pte_refs_one(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { @@ -81,41 +72,42 @@ static bool page_idle_clear_pte_refs_one(struct page *page, } if (referenced) { - clear_page_idle(page); + folio_clear_idle(folio); /* * We cleared the referenced bit in a mapping to this page. To * avoid interference with page reclaim, mark it young so that - * page_referenced() will return > 0. + * folio_referenced() will return > 0. */ - set_page_young(page); + folio_set_young(folio); } return true; } static void page_idle_clear_pte_refs(struct page *page) { + struct folio *folio = page_folio(page); + /* - * Since rwc.arg is unused, rwc is effectively immutable, so we - * can make it static const to save some cycles and stack. + * Since rwc.try_lock is unused, rwc is effectively immutable, so we + * can make it static to save some cycles and stack. */ - static const struct rmap_walk_control rwc = { + static struct rmap_walk_control rwc = { .rmap_one = page_idle_clear_pte_refs_one, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; - if (!page_mapped(page) || - !page_rmapping(page)) + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) return; - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) return; - rmap_walk(page, (struct rmap_walk_control *)&rwc); + rmap_walk(folio, &rwc); if (need_lock) - unlock_page(page); + folio_unlock(folio); } static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, @@ -214,16 +206,6 @@ static const struct attribute_group page_idle_attr_group = { .name = "page_idle", }; -#ifndef CONFIG_64BIT -static bool need_page_idle(void) -{ - return true; -} -struct page_ext_operations page_idle_ops = { - .need = need_page_idle, -}; -#endif - static int __init page_idle_init(void) { int err; |