diff options
author | 2025-01-29 14:05:26 +1030 | |
---|---|---|
committer | 2025-03-18 20:35:42 +0100 | |
commit | 6c6201278e65378b53d8b6933a44018653b081e0 (patch) | |
tree | c7a625b5e7e1ca817cdece6637e2fb95fd14e381 | |
parent | btrfs: simplify subpage handling of btrfs_clear_buffer_dirty() (diff) | |
download | wireguard-linux-6c6201278e65378b53d8b6933a44018653b081e0.tar.xz wireguard-linux-6c6201278e65378b53d8b6933a44018653b081e0.zip |
btrfs: simplify subpage handling of write_one_eb()
Currently inside write_one_eb() we have two different ways of handling
subpage and regular metadata.
The differences are:
- Extra offset/length calculation when adding the folio range to bio for
subpage cases
- Only decrease wbc->nr_to_write if the whole page is no longer dirty
for subpage cases
- Use subpage helper for subpage cases
Merge the tow ways into a shared one:
- Always calculate the to-be-queued range
So that bio_add_folio() can use the same calculated resulted length
and offset for both cases.
- Use btrfs_meta_folio_clear_dirty() and
btrfs_meta_folio_set_writeback() helpers
This will cover both cases.
- Only decrease wbc->nr_to_write if the folio is no longer dirty
Since we have the folio locked, no one else can modify the folio dirty
flags (set_extent_buffer_dirty() will also lock the folio for subpage
cases).
Thus after our btrfs_meta_folio_clear_dirty() call, if the whole folio
is no longer dirty, we're submitting the last dirty eb of the folio,
and can decrease wbc->nr_to_write properly.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r-- | fs/btrfs/extent_io.c | 42 |
1 files changed, 13 insertions, 29 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 354dd2522531..b23d27cfdf14 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1786,6 +1786,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, { struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_bio *bbio; + const int num_folios = num_extent_folios(eb); prepare_eb_write(eb); @@ -1797,38 +1798,21 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, wbc_init_bio(wbc, &bbio->bio); bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; - if (btrfs_meta_is_subpage(fs_info)) { - struct folio *folio = eb->folios[0]; - bool ret; + for (int i = 0; i < num_folios; i++) { + struct folio *folio = eb->folios[i]; + u64 range_start = max_t(u64, eb->start, folio_pos(folio)); + u32 range_len = min_t(u64, folio_pos(folio) + folio_size(folio), + eb->start + eb->len) - range_start; folio_lock(folio); - btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len); - if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, - eb->len)) { - folio_clear_dirty_for_io(folio); - wbc->nr_to_write--; - } - ret = bio_add_folio(&bbio->bio, folio, eb->len, - eb->start - folio_pos(folio)); - ASSERT(ret); - wbc_account_cgroup_owner(wbc, folio, eb->len); - folio_unlock(folio); - } else { - int num_folios = num_extent_folios(eb); - - for (int i = 0; i < num_folios; i++) { - struct folio *folio = eb->folios[i]; - bool ret; - - folio_lock(folio); - folio_clear_dirty_for_io(folio); - folio_start_writeback(folio); - ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0); - ASSERT(ret); - wbc_account_cgroup_owner(wbc, folio, eb->folio_size); + btrfs_meta_folio_clear_dirty(fs_info, folio, eb->start, eb->len); + btrfs_meta_folio_set_writeback(fs_info, folio, eb->start, eb->len); + if (!folio_test_dirty(folio)) wbc->nr_to_write -= folio_nr_pages(folio); - folio_unlock(folio); - } + bio_add_folio_nofail(&bbio->bio, folio, range_len, + offset_in_folio(folio, range_start)); + wbc_account_cgroup_owner(wbc, folio, range_len); + folio_unlock(folio); } btrfs_submit_bbio(bbio, 0); } |