aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da13bd0d10f1..81737eff92f3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4809,7 +4809,7 @@ again:
goto out_unlock;
if (!PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
+ ret = btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
@@ -8204,7 +8204,7 @@ static void btrfs_readahead(struct readahead_control *rac)
}
/*
- * For releasepage() and invalidate_folio() we have a race window where
+ * For release_folio() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
* If we continue to release/invalidate the page, we could cause use-after-free
* for subpage spinlock. So this function is to spin and wait for subpage
@@ -8236,22 +8236,22 @@ static void wait_subpage_spinlock(struct page *page)
spin_unlock_irq(&subpage->lock);
}
-static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- int ret = try_release_extent_mapping(page, gfp_flags);
+ int ret = try_release_extent_mapping(&folio->page, gfp_flags);
if (ret == 1) {
- wait_subpage_spinlock(page);
- clear_page_extent_mapped(page);
+ wait_subpage_spinlock(&folio->page);
+ clear_page_extent_mapped(&folio->page);
}
return ret;
}
-static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (PageWriteback(page) || PageDirty(page))
- return 0;
- return __btrfs_releasepage(page, gfp_flags);
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return false;
+ return __btrfs_release_folio(folio, gfp_flags);
}
#ifdef CONFIG_MIGRATION
@@ -8322,7 +8322,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* still safe to wait for ordered extent to finish.
*/
if (!(offset == 0 && length == folio_size(folio))) {
- btrfs_releasepage(&folio->page, GFP_NOFS);
+ btrfs_release_folio(folio, GFP_NOFS);
return;
}
@@ -8446,7 +8446,7 @@ next:
ASSERT(!folio_test_ordered(folio));
btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
- __btrfs_releasepage(&folio->page, GFP_NOFS);
+ __btrfs_release_folio(folio, GFP_NOFS);
clear_page_extent_mapped(&folio->page);
}
@@ -11415,13 +11415,13 @@ static const struct file_operations btrfs_dir_file_operations = {
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
- .readpage = btrfs_readpage,
+ .read_folio = btrfs_read_folio,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.direct_IO = noop_direct_IO,
.invalidate_folio = btrfs_invalidate_folio,
- .releasepage = btrfs_releasepage,
+ .release_folio = btrfs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btrfs_migratepage,
#endif