From 9f100e3b37590828ae23b0210ee634d14b28b8e8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 5 Apr 2024 16:32:27 +0100 Subject: mm: convert free_zone_device_page to free_zone_device_folio Both callers already have a folio; pass it in and save a few calls to compound_head(). Link: https://lkml.kernel.org/r/20240405153228.2563754-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Signed-off-by: Andrew Morton --- mm/memremap.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'mm/memremap.c') diff --git a/mm/memremap.c b/mm/memremap.c index 9e9fb1972fff..e1776693e2ea 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -456,21 +456,23 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, } EXPORT_SYMBOL_GPL(get_dev_pagemap); -void free_zone_device_page(struct page *page) +void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) + if (WARN_ON_ONCE(!folio->page.pgmap->ops || + !folio->page.pgmap->ops->page_free)) return; - mem_cgroup_uncharge(page_folio(page)); + mem_cgroup_uncharge(folio); /* * Note: we don't expect anonymous compound pages yet. Once supported * and we could PTE-map them similar to THP, we'd have to clear * PG_anon_exclusive on all tail pages. */ - VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); - if (PageAnon(page)) - __ClearPageAnonExclusive(page); + if (folio_test_anon(folio)) { + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + __ClearPageAnonExclusive(folio_page(folio, 0)); + } /* * When a device managed page is freed, the folio->mapping field @@ -481,20 +483,20 @@ void free_zone_device_page(struct page *page) * * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need - * to clear page->mapping. + * to clear folio->mapping. */ - page->mapping = NULL; - page->pgmap->ops->page_free(page); + folio->mapping = NULL; + folio->page.pgmap->ops->page_free(folio_page(folio, 0)); - if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && - page->pgmap->type != MEMORY_DEVICE_COHERENT) + if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE && + folio->page.pgmap->type != MEMORY_DEVICE_COHERENT) /* - * Reset the page count to 1 to prepare for handing out the page + * Reset the refcount to 1 to prepare for handing out the page * again. */ - set_page_count(page, 1); + folio_set_count(folio, 1); else - put_dev_pagemap(page->pgmap); + put_dev_pagemap(folio->page.pgmap); } void zone_device_page_init(struct page *page) -- cgit v1.2.3-59-g8ed1b From 53e45c4f6d4f6cd7e62f4cb016018ba31c2ac8b4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 24 Apr 2024 20:19:08 +0100 Subject: mm: convert put_devmap_managed_page_refs() to put_devmap_managed_folio_refs() All callers have a folio so we can remove this use of page_ref_sub_return(). Link: https://lkml.kernel.org/r/20240424191914.361554-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 ++++++------ mm/gup.c | 6 +++--- mm/memremap.c | 10 +++++----- mm/swap.c | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) (limited to 'mm/memremap.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index deb51f8dc283..9849dfda44d4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1441,17 +1441,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf); #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -bool __put_devmap_managed_page_refs(struct page *page, int refs); -static inline bool put_devmap_managed_page_refs(struct page *page, int refs) +bool __put_devmap_managed_folio_refs(struct folio *folio, int refs); +static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) { if (!static_branch_unlikely(&devmap_managed_key)) return false; - if (!is_zone_device_page(page)) + if (!folio_is_zone_device(folio)) return false; - return __put_devmap_managed_page_refs(page, refs); + return __put_devmap_managed_folio_refs(folio, refs); } #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_page_refs(struct page *page, int refs) +static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) { return false; } @@ -1575,7 +1575,7 @@ static inline void put_page(struct page *page) * For some devmap managed pages we need to catch refcount transition * from 2 to 1: */ - if (put_devmap_managed_page_refs(&folio->page, 1)) + if (put_devmap_managed_folio_refs(folio, 1)) return; folio_put(folio); } diff --git a/mm/gup.c b/mm/gup.c index 8dcbeae714e2..44c6ceee7e1f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -89,7 +89,7 @@ retry: * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { - if (!put_devmap_managed_page_refs(&folio->page, refs)) + if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); goto retry; } @@ -156,7 +156,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) */ if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { - if (!put_devmap_managed_page_refs(&folio->page, refs)) + if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); return NULL; } @@ -198,7 +198,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) refs *= GUP_PIN_COUNTING_BIAS; } - if (!put_devmap_managed_page_refs(&folio->page, refs)) + if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); } diff --git a/mm/memremap.c b/mm/memremap.c index e1776693e2ea..40d4547ce514 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -512,9 +512,9 @@ void zone_device_page_init(struct page *page) EXPORT_SYMBOL_GPL(zone_device_page_init); #ifdef CONFIG_FS_DAX -bool __put_devmap_managed_page_refs(struct page *page, int refs) +bool __put_devmap_managed_folio_refs(struct folio *folio, int refs) { - if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) + if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX) return false; /* @@ -522,9 +522,9 @@ bool __put_devmap_managed_page_refs(struct page *page, int refs) * refcount is 1, then the page is free and the refcount is * stable because nobody holds a reference on the page. */ - if (page_ref_sub_return(page, refs) == 1) - wake_up_var(&page->_refcount); + if (folio_ref_sub_return(folio, refs) == 1) + wake_up_var(&folio->_refcount); return true; } -EXPORT_SYMBOL(__put_devmap_managed_page_refs); +EXPORT_SYMBOL(__put_devmap_managed_folio_refs); #endif /* CONFIG_FS_DAX */ diff --git a/mm/swap.c b/mm/swap.c index 8ae5cd4ed180..bfce6d53b237 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -980,7 +980,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_page_refs(&folio->page, nr_refs)) + if (put_devmap_managed_folio_refs(folio, nr_refs)) continue; if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); -- cgit v1.2.3-59-g8ed1b