aboutsummaryrefslogtreecommitdiffstats
path: root/mm/gup.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-22 23:43:16 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:56:36 -0400
commit12521c7606b2037f8ac2a2fab19e955444a549cf (patch)
tree6592d29de77422059b71a41043c1a482885b32e7 /mm/gup.c
parentmm/gup: Convert gup_huge_pgd() to use a folio (diff)
downloadlinux-dev-12521c7606b2037f8ac2a2fab19e955444a549cf.tar.xz
linux-dev-12521c7606b2037f8ac2a2fab19e955444a549cf.zip
mm/gup: Turn compound_next() into gup_folio_next()
Convert both callers to work on folios instead of pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 23b83bc16e45..0bde28f0543f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -230,20 +230,19 @@ static inline struct page *compound_range_next(struct page *start,
return page;
}
-static inline struct page *compound_next(struct page **list,
+static inline struct folio *gup_folio_next(struct page **list,
unsigned long npages, unsigned long i, unsigned int *ntails)
{
- struct page *page;
+ struct folio *folio = page_folio(list[i]);
unsigned int nr;
- page = compound_head(list[i]);
for (nr = i + 1; nr < npages; nr++) {
- if (compound_head(list[nr]) != page)
+ if (page_folio(list[nr]) != folio)
break;
}
*ntails = nr - i;
- return page;
+ return folio;
}
/**
@@ -271,17 +270,17 @@ static inline struct page *compound_next(struct page **list,
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty)
{
- unsigned long index;
- struct page *head;
- unsigned int ntails;
+ unsigned long i;
+ struct folio *folio;
+ unsigned int nr;
if (!make_dirty) {
unpin_user_pages(pages, npages);
return;
}
- for (index = 0; index < npages; index += ntails) {
- head = compound_next(pages, npages, index, &ntails);
+ for (i = 0; i < npages; i += nr) {
+ folio = gup_folio_next(pages, npages, i, &nr);
/*
* Checking PageDirty at this point may race with
* clear_page_dirty_for_io(), but that's OK. Two key
@@ -302,9 +301,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
* written back, so it gets written back again in the
* next writeback cycle. This is harmless.
*/
- if (!PageDirty(head))
- set_page_dirty_lock(head);
- put_compound_head(head, ntails, FOLL_PIN);
+ if (!folio_test_dirty(folio)) {
+ folio_lock(folio);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ }
+ gup_put_folio(folio, nr, FOLL_PIN);
}
}
EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
@@ -357,9 +359,9 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
*/
void unpin_user_pages(struct page **pages, unsigned long npages)
{
- unsigned long index;
- struct page *head;
- unsigned int ntails;
+ unsigned long i;
+ struct folio *folio;
+ unsigned int nr;
/*
* If this WARN_ON() fires, then the system *might* be leaking pages (by
@@ -369,9 +371,9 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
if (WARN_ON(IS_ERR_VALUE(npages)))
return;
- for (index = 0; index < npages; index += ntails) {
- head = compound_next(pages, npages, index, &ntails);
- put_compound_head(head, ntails, FOLL_PIN);
+ for (i = 0; i < npages; i += nr) {
+ folio = gup_folio_next(pages, npages, i, &nr);
+ gup_put_folio(folio, nr, FOLL_PIN);
}
}
EXPORT_SYMBOL(unpin_user_pages);