From 9cf35f673583ccc9f3e2507498b3079d56614ad3 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Wed, 11 Sep 2019 11:45:15 -0500 Subject: btrfs: simplify inode locking for RWF_NOWAIT This is similar to 942491c9e6d6 ("xfs: fix AIM7 regression"). Apparently our current rwsem code doesn't like doing the trylock, then lock for real scheme. This causes extra contention on the lock and can be measured eg. by AIM7 benchmark. So change our read/write methods to just do the trylock for the RWF_NOWAIT case. Fixes: edf064e7c6fe ("btrfs: nowait aio support") Signed-off-by: Goldwyn Rodrigues Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/file.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 435a502a3226..25ce1b6dbda9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1903,9 +1903,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, (iocb->ki_flags & IOCB_NOWAIT)) return -EOPNOTSUPP; - if (!inode_trylock(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock(inode)) return -EAGAIN; + } else { inode_lock(inode); } -- cgit v1.2.3-59-g8ed1b From 35b814f3c53e0635cbb1de3ea5d58776eafe8e20 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 12 Sep 2019 18:31:44 +0300 Subject: btrfs: Add assert to catch nested transaction commit A recent patch to btrfs showed that there was at least 1 case where a nested transaction was committed. Nested transaction in this case means a code which has a transaction handle calls some function which in turn obtains a copy of the same transaction handle. In such cases the correct thing to do is for the lower callee to call btrfs_end_transaction which contains appropriate checks so as to not commit the transaction which will result in stale trans handler for the caller. To catch such cases add an assert in btrfs_commit_transaction ensuring btrfs_trans_handle::use_count is always 1. Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8624bdee8c5b..8b75426c349e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1949,6 +1949,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *prev_trans = NULL; int ret; + ASSERT(refcount_read(&trans->use_count) == 1); + /* Stop the commit early if ->aborted is set */ if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; -- cgit v1.2.3-59-g8ed1b From b9fae2ebee0cc3a16e1b61fa397099886886e906 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 11 Sep 2019 17:42:38 +0100 Subject: Btrfs: make btrfs_wait_extents() static It's not used ouside of transaction.c Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 2 +- fs/btrfs/transaction.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8b75426c349e..92ffe1df3fab 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -990,7 +990,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, return werr; } -int btrfs_wait_extents(struct btrfs_fs_info *fs_info, +static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { bool errors = false; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2c5a6f6e5bb0..c51135d9b448 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -218,8 +218,6 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages, int mark); -int btrfs_wait_extents(struct btrfs_fs_info *fs_info, - struct extent_io_tree *dirty_pages); int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); int btrfs_transaction_blocked(struct btrfs_fs_info *info); int btrfs_transaction_in_commit(struct btrfs_fs_info *info); -- cgit v1.2.3-59-g8ed1b From c18679ebd86857d4dae5a7462f38c4c98d8445de Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 26 Aug 2019 15:40:38 +0800 Subject: btrfs: tree-checker: Try to detect missing INODE_ITEM For the following items, key->objectid is inode number: - DIR_ITEM - DIR_INDEX - XATTR_ITEM - EXTENT_DATA - INODE_REF So in the subvolume tree, such items must have its previous item share the same objectid, e.g.: (257 INODE_ITEM 0) (257 DIR_INDEX xxx) (257 DIR_ITEM xxx) (258 INODE_ITEM 0) (258 INODE_REF 0) (258 XATTR_ITEM 0) (258 EXTENT_DATA 0) But if we have the following sequence, then there is definitely something wrong, normally some INODE_ITEM is missing, like: (257 INODE_ITEM 0) (257 DIR_INDEX xxx) (257 DIR_ITEM xxx) (258 XATTR_ITEM 0) <<< objecitd suddenly changed to 258 (258 EXTENT_DATA 0) So just by checking the previous key for above inode based key types, we can detect a missing inode item. For INODE_REF key type, the check will be added along with INODE_REF checker. Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 076d5b8014fb..5aca82e67309 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -141,6 +141,20 @@ static int check_extent_data_item(struct extent_buffer *leaf, return -EUCLEAN; } + /* + * Previous key must have the same key->objectid (ino). + * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA. + * But if objectids mismatch, it means we have a missing + * INODE_ITEM. + */ + if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) && + prev_key->objectid != key->objectid) { + file_extent_err(leaf, slot, + "invalid previous key objectid, have %llu expect %llu", + prev_key->objectid, key->objectid); + return -EUCLEAN; + } + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { @@ -299,13 +313,22 @@ static void dir_item_err(const struct extent_buffer *eb, int slot, } static int check_dir_item(struct extent_buffer *leaf, - struct btrfs_key *key, int slot) + struct btrfs_key *key, struct btrfs_key *prev_key, + int slot) { struct btrfs_fs_info *fs_info = leaf->fs_info; struct btrfs_dir_item *di; u32 item_size = btrfs_item_size_nr(leaf, slot); u32 cur = 0; + /* Same check as in check_extent_data_item() */ + if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) && + prev_key->objectid != key->objectid) { + dir_item_err(leaf, slot, + "invalid previous key objectid, have %llu expect %llu", + prev_key->objectid, key->objectid); + return -EUCLEAN; + } di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); while (cur < item_size) { u32 name_len; @@ -1244,7 +1267,7 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_DIR_ITEM_KEY: case BTRFS_DIR_INDEX_KEY: case BTRFS_XATTR_ITEM_KEY: - ret = check_dir_item(leaf, key, slot); + ret = check_dir_item(leaf, key, prev_key, slot); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: ret = check_block_group_item(leaf, key, slot); -- cgit v1.2.3-59-g8ed1b From 71bf92a9b8777635fede96eb4ab3b5b7bf310159 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 26 Aug 2019 15:40:39 +0800 Subject: btrfs: tree-checker: Add check for INODE_REF For INODE_REF we will check: - Objectid (ino) against previous key To detect missing INODE_ITEM. - No overflow/padding in the data payload Much like DIR_ITEM, but with less members to check. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 5aca82e67309..f041ec5f415c 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1247,6 +1247,56 @@ static int check_extent_data_ref(struct extent_buffer *leaf, return 0; } +#define inode_ref_err(fs_info, eb, slot, fmt, args...) \ + inode_item_err(fs_info, eb, slot, fmt, ##args) +static int check_inode_ref(struct extent_buffer *leaf, + struct btrfs_key *key, struct btrfs_key *prev_key, + int slot) +{ + struct btrfs_inode_ref *iref; + unsigned long ptr; + unsigned long end; + + /* namelen can't be 0, so item_size == sizeof() is also invalid */ + if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { + inode_ref_err(fs_info, leaf, slot, + "invalid item size, have %u expect (%zu, %u)", + btrfs_item_size_nr(leaf, slot), + sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); + return -EUCLEAN; + } + + ptr = btrfs_item_ptr_offset(leaf, slot); + end = ptr + btrfs_item_size_nr(leaf, slot); + while (ptr < end) { + u16 namelen; + + if (ptr + sizeof(iref) > end) { + inode_ref_err(fs_info, leaf, slot, + "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", + ptr, end, sizeof(iref)); + return -EUCLEAN; + } + + iref = (struct btrfs_inode_ref *)ptr; + namelen = btrfs_inode_ref_name_len(leaf, iref); + if (ptr + sizeof(*iref) + namelen > end) { + inode_ref_err(fs_info, leaf, slot, + "inode ref overflow, ptr %lu end %lu namelen %u", + ptr, end, namelen); + return -EUCLEAN; + } + + /* + * NOTE: In theory we should record all found index numbers + * to find any duplicated indexes, but that will be too time + * consuming for inodes with too many hard links. + */ + ptr += sizeof(*iref) + namelen; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -1269,6 +1319,9 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_XATTR_ITEM_KEY: ret = check_dir_item(leaf, key, prev_key, slot); break; + case BTRFS_INODE_REF_KEY: + ret = check_inode_ref(leaf, key, prev_key, slot); + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: ret = check_block_group_item(leaf, key, slot); break; -- cgit v1.2.3-59-g8ed1b From f624d976081d371b3dea65935501d8d9f142d5df Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 10 Sep 2019 15:40:17 +0800 Subject: btrfs: ctree: Reduce one indent level for btrfs_search_slot() In btrfs_search_slot(), we something like: if (level != 0) { /* Do search inside tree nodes*/ } else { /* Do search inside tree leaves */ goto done; } This caused extra indent for tree node search code. Change it to something like: if (level == 0) { /* Do search inside tree leaves */ goto done' } /* Do search inside tree nodes */ So we have more space to maneuver our code, this is especially useful as the tree nodes search code is more complex than the leaves search code. Reviewed-by: Anand Jain Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 131 +++++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 67 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index e59cde204b2f..dea29cd1f2e8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2785,6 +2785,8 @@ again: } while (b) { + int dec = 0; + level = btrfs_header_level(b); /* @@ -2861,73 +2863,7 @@ cow_done: if (ret < 0) goto done; - if (level != 0) { - int dec = 0; - if (ret && slot > 0) { - dec = 1; - slot -= 1; - } - p->slots[level] = slot; - err = setup_nodes_for_search(trans, root, p, b, level, - ins_len, &write_lock_level); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } - b = p->nodes[level]; - slot = p->slots[level]; - - /* - * slot 0 is special, if we change the key - * we have to update the parent pointer - * which means we must have a write lock - * on the parent - */ - if (slot == 0 && ins_len && - write_lock_level < level + 1) { - write_lock_level = level + 1; - btrfs_release_path(p); - goto again; - } - - unlock_up(p, level, lowest_unlock, - min_write_lock_level, &write_lock_level); - - if (level == lowest_level) { - if (dec) - p->slots[level]++; - goto done; - } - - err = read_block_for_search(root, p, &b, level, - slot, key); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } - - if (!p->skip_locking) { - level = btrfs_header_level(b); - if (level <= write_lock_level) { - if (!btrfs_try_tree_write_lock(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_lock(b); - } - p->locks[level] = BTRFS_WRITE_LOCK; - } else { - if (!btrfs_tree_read_lock_atomic(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_read_lock(b); - } - p->locks[level] = BTRFS_READ_LOCK; - } - p->nodes[level] = b; - } - } else { + if (level == 0) { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(b) < ins_len) { @@ -2952,6 +2888,67 @@ cow_done: min_write_lock_level, NULL); goto done; } + if (ret && slot > 0) { + dec = 1; + slot--; + } + p->slots[level] = slot; + err = setup_nodes_for_search(trans, root, p, b, level, ins_len, + &write_lock_level); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; + goto done; + } + b = p->nodes[level]; + slot = p->slots[level]; + + /* + * Slot 0 is special, if we change the key we have to update + * the parent pointer which means we must have a write lock on + * the parent + */ + if (slot == 0 && ins_len && write_lock_level < level + 1) { + write_lock_level = level + 1; + btrfs_release_path(p); + goto again; + } + + unlock_up(p, level, lowest_unlock, min_write_lock_level, + &write_lock_level); + + if (level == lowest_level) { + if (dec) + p->slots[level]++; + goto done; + } + + err = read_block_for_search(root, p, &b, level, slot, key); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; + goto done; + } + + if (!p->skip_locking) { + level = btrfs_header_level(b); + if (level <= write_lock_level) { + if (!btrfs_try_tree_write_lock(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_lock(b); + } + p->locks[level] = BTRFS_WRITE_LOCK; + } else { + if (!btrfs_tree_read_lock_atomic(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_read_lock(b); + } + p->locks[level] = BTRFS_READ_LOCK; + } + p->nodes[level] = b; + } } ret = 1; done: -- cgit v1.2.3-59-g8ed1b From abe9339d69bd096e2020ffc39fedee2ac0a9c03b Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 10 Sep 2019 15:40:18 +0800 Subject: btrfs: ctree: Reduce one indent level for btrfs_search_old_slot() Similar to btrfs_search_slot() done in previous patch, make a shortcut for the level 0 case and allow to reduce indentation for the remaining case. Reviewed-by: Anand Jain Signed-off-by: Qu Wenruo Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 68 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index dea29cd1f2e8..42ca326a0e6b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3005,6 +3005,8 @@ again: p->locks[level] = BTRFS_READ_LOCK; while (b) { + int dec = 0; + level = btrfs_header_level(b); p->nodes[level] = b; @@ -3025,47 +3027,45 @@ again: if (ret < 0) goto done; - if (level != 0) { - int dec = 0; - if (ret && slot > 0) { - dec = 1; - slot -= 1; - } + if (level == 0) { p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); + goto done; + } - if (level == lowest_level) { - if (dec) - p->slots[level]++; - goto done; - } + if (ret && slot > 0) { + dec = 1; + slot--; + } + p->slots[level] = slot; + unlock_up(p, level, lowest_unlock, 0, NULL); - err = read_block_for_search(root, p, &b, level, - slot, key); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } + if (level == lowest_level) { + if (dec) + p->slots[level]++; + goto done; + } - level = btrfs_header_level(b); - if (!btrfs_tree_read_lock_atomic(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_read_lock(b); - } - b = tree_mod_log_rewind(fs_info, p, b, time_seq); - if (!b) { - ret = -ENOMEM; - goto done; - } - p->locks[level] = BTRFS_READ_LOCK; - p->nodes[level] = b; - } else { - p->slots[level] = slot; - unlock_up(p, level, lowest_unlock, 0, NULL); + err = read_block_for_search(root, p, &b, level, slot, key); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; goto done; } + + level = btrfs_header_level(b); + if (!btrfs_tree_read_lock_atomic(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_read_lock(b); + } + b = tree_mod_log_rewind(fs_info, p, b, time_seq); + if (!b) { + ret = -ENOMEM; + goto done; + } + p->locks[level] = BTRFS_READ_LOCK; + p->nodes[level] = b; } ret = 1; done: -- cgit v1.2.3-59-g8ed1b From 34ffafdba12eee3e387694edd128e954fd355e8e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 10 Sep 2019 15:40:19 +0800 Subject: btrfs: ctree: Remove stray comment of setting up path lock The following comment shows up in btrfs_search_slot() with out much sense: /* * setup the path here so we can release it under lock * contention with the cow code */ if (cow) { /* code touching path->lock[] is far away from here */ } This comment hasn't been cleaned up after the relevant code has been removed. The original code is introduced in commit 65b51a009e29 ("btrfs_search_slot: reduce lock contention by cowing in two stages"): + + /* + * setup the path here so we can release it under lock + * contention with the cow code + */ + p->nodes[level] = b; + if (!p->skip_locking) + p->locks[level] = 1; + But in current code, we have different timing for modifying path lock, so just remove the comment. Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 42ca326a0e6b..0231141de289 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2789,10 +2789,6 @@ again: level = btrfs_header_level(b); - /* - * setup the path here so we can release it under lock - * contention with the cow code - */ if (cow) { bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); -- cgit v1.2.3-59-g8ed1b From 33ca832fefa5ebf66df983783522c888b851fbe4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 23 Sep 2019 10:05:17 -0400 Subject: btrfs: separate out the extent leak code We check both extent buffer and extent state leaks in the same function, separate these two functions out so we can move them around. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cceaf05aada2..1a17218d756d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -59,12 +59,23 @@ void btrfs_leak_debug_del(struct list_head *entry) spin_unlock_irqrestore(&leak_lock, flags); } -static inline -void btrfs_leak_debug_check(void) +static inline void btrfs_extent_buffer_leak_debug_check(void) { - struct extent_state *state; struct extent_buffer *eb; + while (!list_empty(&buffers)) { + eb = list_entry(buffers.next, struct extent_buffer, leak_list); + pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n", + eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); + list_del(&eb->leak_list); + kmem_cache_free(extent_buffer_cache, eb); + } +} + +static inline void btrfs_extent_state_leak_debug_check(void) +{ + struct extent_state *state; + while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", @@ -74,14 +85,6 @@ void btrfs_leak_debug_check(void) list_del(&state->leak_list); kmem_cache_free(extent_state_cache, state); } - - while (!list_empty(&buffers)) { - eb = list_entry(buffers.next, struct extent_buffer, leak_list); - pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n", - eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); - list_del(&eb->leak_list); - kmem_cache_free(extent_buffer_cache, eb); - } } #define btrfs_debug_check_extent_io_range(tree, start, end) \ @@ -105,7 +108,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, #else #define btrfs_leak_debug_add(new, head) do {} while (0) #define btrfs_leak_debug_del(entry) do {} while (0) -#define btrfs_leak_debug_check() do {} while (0) +#define btrfs_extent_buffer_leak_debug_check() do {} while (0) +#define btrfs_extent_state_leak_debug_check() do {} while (0) #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #endif @@ -235,7 +239,8 @@ free_state_cache: void __cold extent_io_exit(void) { - btrfs_leak_debug_check(); + btrfs_extent_buffer_leak_debug_check(); + btrfs_extent_state_leak_debug_check(); /* * Make sure all delayed rcu free are flushed before we -- cgit v1.2.3-59-g8ed1b From 6f0d04f8e72e1c7fd17e7fac0fea82553a6443b4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 23 Sep 2019 10:05:18 -0400 Subject: btrfs: separate out the extent io init function We are moving extent_io_tree into it's on file, so separate out the extent_state init stuff from extent_io_tree_init(). Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 18 +++++++++++------- fs/btrfs/extent_io.h | 2 ++ fs/btrfs/super.c | 9 ++++++++- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1a17218d756d..dffa7c6373e1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -200,19 +200,23 @@ static int __must_check flush_write_bio(struct extent_page_data *epd) return ret; } -int __init extent_io_init(void) +int __init extent_state_cache_init(void) { extent_state_cache = kmem_cache_create("btrfs_extent_state", sizeof(struct extent_state), 0, SLAB_MEM_SPREAD, NULL); if (!extent_state_cache) return -ENOMEM; + return 0; +} +int __init extent_io_init(void) +{ extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", sizeof(struct extent_buffer), 0, SLAB_MEM_SPREAD, NULL); if (!extent_buffer_cache) - goto free_state_cache; + return -ENOMEM; if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_io_bio, bio), @@ -230,24 +234,24 @@ free_bioset: free_buffer_cache: kmem_cache_destroy(extent_buffer_cache); extent_buffer_cache = NULL; + return -ENOMEM; +} -free_state_cache: +void __cold extent_state_cache_exit(void) +{ + btrfs_extent_state_leak_debug_check(); kmem_cache_destroy(extent_state_cache); - extent_state_cache = NULL; - return -ENOMEM; } void __cold extent_io_exit(void) { btrfs_extent_buffer_leak_debug_check(); - btrfs_extent_state_leak_debug_check(); /* * Make sure all delayed rcu free are flushed before we * destroy caches. */ rcu_barrier(); - kmem_cache_destroy(extent_state_cache); kmem_cache_destroy(extent_buffer_cache); bioset_exit(&btrfs_bioset); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index cf3424d58fec..e813f593202d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -556,4 +556,6 @@ bool find_lock_delalloc_range(struct inode *inode, struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); +int __init extent_state_cache_init(void); +void __cold extent_state_cache_exit(void); #endif diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1b151af25772..843015b9a11e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2360,10 +2360,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_cachep; - err = extent_map_init(); + err = extent_state_cache_init(); if (err) goto free_extent_io; + err = extent_map_init(); + if (err) + goto free_extent_state_cache; + err = ordered_data_init(); if (err) goto free_extent_map; @@ -2422,6 +2426,8 @@ free_ordered_data: ordered_data_exit(); free_extent_map: extent_map_exit(); +free_extent_state_cache: + extent_state_cache_exit(); free_extent_io: extent_io_exit(); free_cachep: @@ -2442,6 +2448,7 @@ static void __exit exit_btrfs_fs(void) btrfs_prelim_ref_exit(); ordered_data_exit(); extent_map_exit(); + extent_state_cache_exit(); extent_io_exit(); btrfs_interface_exit(); btrfs_end_io_wq_exit(); -- cgit v1.2.3-59-g8ed1b From 9c7d3a548331e72ba3613eaa5c8a74839462b764 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 23 Sep 2019 10:05:19 -0400 Subject: btrfs: move extent_io_tree defs to their own header extent_io.c/h are huge, encompassing a bunch of different things. The extent_io_tree code can live on its own, so separate this out. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-io-tree.h | 227 ++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/extent_io.c | 1 + fs/btrfs/extent_io.h | 217 +------------------------------------------- 4 files changed, 230 insertions(+), 216 deletions(-) create mode 100644 fs/btrfs/extent-io-tree.h diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fe2b8765d9e6..9939b552089b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -28,6 +28,7 @@ #include #include #include +#include "extent-io-tree.h" #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h new file mode 100644 index 000000000000..83f0f2438214 --- /dev/null +++ b/fs/btrfs/extent-io-tree.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef BTRFS_EXTENT_IO_TREE_H +#define BTRFS_EXTENT_IO_TREE_H + +struct extent_changeset; + +/* Bits for the extent state */ +#define EXTENT_DIRTY (1U << 0) +#define EXTENT_UPTODATE (1U << 1) +#define EXTENT_LOCKED (1U << 2) +#define EXTENT_NEW (1U << 3) +#define EXTENT_DELALLOC (1U << 4) +#define EXTENT_DEFRAG (1U << 5) +#define EXTENT_BOUNDARY (1U << 6) +#define EXTENT_NODATASUM (1U << 7) +#define EXTENT_CLEAR_META_RESV (1U << 8) +#define EXTENT_NEED_WAIT (1U << 9) +#define EXTENT_DAMAGED (1U << 10) +#define EXTENT_NORESERVE (1U << 11) +#define EXTENT_QGROUP_RESERVED (1U << 12) +#define EXTENT_CLEAR_DATA_RESV (1U << 13) +#define EXTENT_DELALLOC_NEW (1U << 14) +#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ + EXTENT_CLEAR_DATA_RESV) +#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) + +/* + * Redefined bits above which are used only in the device allocation tree, + * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV + * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit + * manipulation functions + */ +#define CHUNK_ALLOCATED EXTENT_DIRTY +#define CHUNK_TRIMMED EXTENT_DEFRAG + +enum { + IO_TREE_FS_INFO_FREED_EXTENTS0, + IO_TREE_FS_INFO_FREED_EXTENTS1, + IO_TREE_INODE_IO, + IO_TREE_INODE_IO_FAILURE, + IO_TREE_RELOC_BLOCKS, + IO_TREE_TRANS_DIRTY_PAGES, + IO_TREE_ROOT_DIRTY_LOG_PAGES, + IO_TREE_SELFTEST, +}; + +struct extent_io_tree { + struct rb_root state; + struct btrfs_fs_info *fs_info; + void *private_data; + u64 dirty_bytes; + bool track_uptodate; + + /* Who owns this io tree, should be one of IO_TREE_* */ + u8 owner; + + spinlock_t lock; + const struct extent_io_ops *ops; +}; + +struct extent_state { + u64 start; + u64 end; /* inclusive */ + struct rb_node rb_node; + + /* ADD NEW ELEMENTS AFTER THIS */ + wait_queue_head_t wq; + refcount_t refs; + unsigned state; + + struct io_failure_record *failrec; + +#ifdef CONFIG_BTRFS_DEBUG + struct list_head leak_list; +#endif +}; + +int __init extent_state_cache_init(void); +void __cold extent_state_cache_exit(void); + +void extent_io_tree_init(struct btrfs_fs_info *fs_info, + struct extent_io_tree *tree, unsigned int owner, + void *private_data); +void extent_io_tree_release(struct extent_io_tree *tree); + +int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + struct extent_state **cached); + +static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) +{ + return lock_extent_bits(tree, start, end, NULL); +} + +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); + +int __init extent_io_init(void); +void __cold extent_io_exit(void); + +u64 count_range_bits(struct extent_io_tree *tree, + u64 *start, u64 search_end, + u64 max_bytes, unsigned bits, int contig); + +void free_extent_state(struct extent_state *state); +int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int filled, + struct extent_state *cached_state); +int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, struct extent_changeset *changeset); +int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int wake, int delete, + struct extent_state **cached); +int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int wake, int delete, + struct extent_state **cached, gfp_t mask, + struct extent_changeset *changeset); + +static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) +{ + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); +} + +static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) +{ + return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + GFP_NOFS, NULL); +} + +static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, + u64 start, u64 end, struct extent_state **cached) +{ + return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + GFP_ATOMIC, NULL); +} + +static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, + u64 end, unsigned bits) +{ + int wake = 0; + + if (bits & EXTENT_LOCKED) + wake = 1; + + return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); +} + +int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, struct extent_changeset *changeset); +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, u64 *failed_start, + struct extent_state **cached_state, gfp_t mask); +int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits); + +static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, + u64 end, unsigned bits) +{ + return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); +} + +static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state) +{ + return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, + cached_state, GFP_NOFS, NULL); +} + +static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, + NULL, mask); +} + +static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) +{ + return clear_extent_bit(tree, start, end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, cached); +} + +int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, unsigned clear_bits, + struct extent_state **cached_state); + +static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, + u64 end, unsigned int extra_bits, + struct extent_state **cached_state) +{ + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, + NULL, cached_state, GFP_NOFS); +} + +static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state) +{ + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, + NULL, cached_state, GFP_NOFS); +} + +static inline int set_extent_new(struct extent_io_tree *tree, u64 start, + u64 end) +{ + return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, + GFP_NOFS); +} + +static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state, gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, + cached_state, mask); +} + +int find_first_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, unsigned bits, + struct extent_state **cached_state); +void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, unsigned bits); +int extent_invalidatepage(struct extent_io_tree *tree, + struct page *page, unsigned long offset); + +#endif /* BTRFS_EXTENT_IO_TREE_H */ diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dffa7c6373e1..09fd5e740fa5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -14,6 +14,7 @@ #include #include #include "extent_io.h" +#include "extent-io-tree.h" #include "extent_map.h" #include "ctree.h" #include "btrfs_inode.h" diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e813f593202d..8c782d061132 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -7,35 +7,6 @@ #include #include "ulist.h" -/* bits for the extent state */ -#define EXTENT_DIRTY (1U << 0) -#define EXTENT_UPTODATE (1U << 1) -#define EXTENT_LOCKED (1U << 2) -#define EXTENT_NEW (1U << 3) -#define EXTENT_DELALLOC (1U << 4) -#define EXTENT_DEFRAG (1U << 5) -#define EXTENT_BOUNDARY (1U << 6) -#define EXTENT_NODATASUM (1U << 7) -#define EXTENT_CLEAR_META_RESV (1U << 8) -#define EXTENT_NEED_WAIT (1U << 9) -#define EXTENT_DAMAGED (1U << 10) -#define EXTENT_NORESERVE (1U << 11) -#define EXTENT_QGROUP_RESERVED (1U << 12) -#define EXTENT_CLEAR_DATA_RESV (1U << 13) -#define EXTENT_DELALLOC_NEW (1U << 14) -#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ - EXTENT_CLEAR_DATA_RESV) -#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) - -/* - * Redefined bits above which are used only in the device allocation tree, - * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV - * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit - * manipulation functions - */ -#define CHUNK_ALLOCATED EXTENT_DIRTY -#define CHUNK_TRIMMED EXTENT_DEFRAG - /* * flags for bio submission. The high bits indicate the compression * type for this bio @@ -89,12 +60,11 @@ enum { #define BITMAP_LAST_BYTE_MASK(nbits) \ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) -struct extent_state; struct btrfs_root; struct btrfs_inode; struct btrfs_io_bio; struct io_failure_record; - +struct extent_io_tree; typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, struct bio *bio, u64 bio_offset); @@ -111,47 +81,6 @@ struct extent_io_ops { int mirror); }; -enum { - IO_TREE_FS_INFO_FREED_EXTENTS0, - IO_TREE_FS_INFO_FREED_EXTENTS1, - IO_TREE_INODE_IO, - IO_TREE_INODE_IO_FAILURE, - IO_TREE_RELOC_BLOCKS, - IO_TREE_TRANS_DIRTY_PAGES, - IO_TREE_ROOT_DIRTY_LOG_PAGES, - IO_TREE_SELFTEST, -}; - -struct extent_io_tree { - struct rb_root state; - struct btrfs_fs_info *fs_info; - void *private_data; - u64 dirty_bytes; - bool track_uptodate; - - /* Who owns this io tree, should be one of IO_TREE_* */ - u8 owner; - - spinlock_t lock; - const struct extent_io_ops *ops; -}; - -struct extent_state { - u64 start; - u64 end; /* inclusive */ - struct rb_node rb_node; - - /* ADD NEW ELEMENTS AFTER THIS */ - wait_queue_head_t wq; - refcount_t refs; - unsigned state; - - struct io_failure_record *failrec; - -#ifdef CONFIG_BTRFS_DEBUG - struct list_head leak_list; -#endif -}; #define INLINE_EXTENT_BUFFER_PAGES 16 #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) @@ -259,152 +188,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, u64 start, u64 len, int create); -void extent_io_tree_init(struct btrfs_fs_info *fs_info, - struct extent_io_tree *tree, unsigned int owner, - void *private_data); -void extent_io_tree_release(struct extent_io_tree *tree); int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_buffer(struct page *page); -int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - struct extent_state **cached); -static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) -{ - return lock_extent_bits(tree, start, end, NULL); -} - -int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, int mirror_num); -int __init extent_io_init(void); -void __cold extent_io_exit(void); - -u64 count_range_bits(struct extent_io_tree *tree, - u64 *start, u64 search_end, - u64 max_bytes, unsigned bits, int contig); - -void free_extent_state(struct extent_state *state); -int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int filled, - struct extent_state *cached_state); -int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, struct extent_changeset *changeset); -int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int wake, int delete, - struct extent_state **cached); -int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int wake, int delete, - struct extent_state **cached, gfp_t mask, - struct extent_changeset *changeset); - -static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) -{ - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); -} - -static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) -{ - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - GFP_NOFS, NULL); -} - -static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, - u64 start, u64 end, struct extent_state **cached) -{ - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - GFP_ATOMIC, NULL); -} - -static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, unsigned bits) -{ - int wake = 0; - - if (bits & EXTENT_LOCKED) - wake = 1; - - return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); -} - -int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, struct extent_changeset *changeset); -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, u64 *failed_start, - struct extent_state **cached_state, gfp_t mask); -int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits); - -static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, unsigned bits) -{ - return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); -} - -static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state) -{ - return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, - cached_state, GFP_NOFS, NULL); -} - -static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, - u64 end, gfp_t mask) -{ - return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, - NULL, mask); -} - -static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) -{ - return clear_extent_bit(tree, start, end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, cached); -} - -int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, unsigned clear_bits, - struct extent_state **cached_state); - -static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, - u64 end, unsigned int extra_bits, - struct extent_state **cached_state) -{ - return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, - NULL, cached_state, GFP_NOFS); -} - -static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state) -{ - return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, - NULL, cached_state, GFP_NOFS); -} - -static inline int set_extent_new(struct extent_io_tree *tree, u64 start, - u64 end) -{ - return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, - GFP_NOFS); -} - -static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state, gfp_t mask) -{ - return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, - cached_state, mask); -} - -int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned bits, - struct extent_state **cached_state); -void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned bits); -int extent_invalidatepage(struct extent_io_tree *tree, - struct page *page, unsigned long offset); int extent_write_full_page(struct page *page, struct writeback_control *wbc); int extent_write_locked_range(struct inode *inode, u64 start, u64 end, int mode); @@ -555,7 +343,4 @@ bool find_lock_delalloc_range(struct inode *inode, #endif struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); - -int __init extent_state_cache_init(void); -void __cold extent_state_cache_exit(void); #endif -- cgit v1.2.3-59-g8ed1b From 083e75e7e6a9e24b041ed9cc668dc7a500e31919 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 23 Sep 2019 10:05:20 -0400 Subject: btrfs: export find_delalloc_range This utilizes internal stuff to the extent_io_tree, so we need to export it before we move it. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-io-tree.h | 3 +++ fs/btrfs/extent_io.c | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index 83f0f2438214..3de980c44d5e 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -223,5 +223,8 @@ void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, unsigned bits); int extent_invalidatepage(struct extent_io_tree *tree, struct page *page, unsigned long offset); +bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, + u64 *end, u64 max_bytes, + struct extent_state **cached_state); #endif /* BTRFS_EXTENT_IO_TREE_H */ diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 09fd5e740fa5..5aabbd999bd5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1686,9 +1686,9 @@ out: * * true is returned if we find something, false if nothing was in the tree */ -static noinline bool find_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes, - struct extent_state **cached_state) +bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, + u64 *end, u64 max_bytes, + struct extent_state **cached_state) { struct rb_node *node; struct extent_state *state; @@ -1806,8 +1806,8 @@ again: /* step one, find a bunch of delalloc bytes starting at start */ delalloc_start = *start; delalloc_end = 0; - found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, - max_bytes, &cached_state); + found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, + max_bytes, &cached_state); if (!found || delalloc_end <= *start) { *start = delalloc_start; *end = delalloc_end; -- cgit v1.2.3-59-g8ed1b From b3f167aa6c7053b87fa53364fc40dd4757f053c9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 23 Sep 2019 10:05:21 -0400 Subject: btrfs: move the failrec tree stuff into extent-io-tree.h This needs to be cleaned up in the future, but for now it belongs to the extent-io-tree stuff since it uses the internal tree search code. Needed to export get_state_failrec and set_state_failrec as well since we're not going to move the actual IO part of the failrec stuff out at this point. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-io-tree.h | 18 ++++++++++++++++++ fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/extent_io.h | 11 ----------- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index 3de980c44d5e..a3febe746c79 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -4,6 +4,7 @@ #define BTRFS_EXTENT_IO_TREE_H struct extent_changeset; +struct io_failure_record; /* Bits for the extent state */ #define EXTENT_DIRTY (1U << 0) @@ -227,4 +228,21 @@ bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, u64 *end, u64 max_bytes, struct extent_state **cached_state); +/* This should be reworked in the future and put elsewhere. */ +int get_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record **failrec); +int set_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record *failrec); +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, + u64 end); +int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, + struct io_failure_record **failrec_ret); +int free_io_failure(struct extent_io_tree *failure_tree, + struct extent_io_tree *io_tree, + struct io_failure_record *rec); +int clean_io_failure(struct btrfs_fs_info *fs_info, + struct extent_io_tree *failure_tree, + struct extent_io_tree *io_tree, u64 start, + struct page *page, u64 ino, unsigned int pg_offset); + #endif /* BTRFS_EXTENT_IO_TREE_H */ diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5aabbd999bd5..0f1d917c8bb3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2024,8 +2024,8 @@ out: * set the private field for a given byte offset in the tree. If there isn't * an extent_state there already, this does nothing. */ -static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record *failrec) +int set_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record *failrec) { struct rb_node *node; struct extent_state *state; @@ -2052,8 +2052,8 @@ out: return ret; } -static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record **failrec) +int get_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record **failrec) { struct rb_node *node; struct extent_state *state; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 8c782d061132..e22045cef89b 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -296,10 +296,6 @@ struct btrfs_inode; int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, u64 length, u64 logical, struct page *page, unsigned int pg_offset, int mirror_num); -int clean_io_failure(struct btrfs_fs_info *fs_info, - struct extent_io_tree *failure_tree, - struct extent_io_tree *io_tree, u64 start, - struct page *page, u64 ino, unsigned int pg_offset); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); @@ -323,19 +319,12 @@ struct io_failure_record { }; -void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, - u64 end); -int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, - struct io_failure_record **failrec_ret); bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, struct io_failure_record *failrec, int fail_mirror); struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, struct io_failure_record *failrec, struct page *page, int pg_offset, int icsum, bio_end_io_t *endio_func, void *data); -int free_io_failure(struct extent_io_tree *failure_tree, - struct extent_io_tree *io_tree, - struct io_failure_record *rec); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS bool find_lock_delalloc_range(struct inode *inode, struct page *locked_page, u64 *start, -- cgit v1.2.3-59-g8ed1b From cdc6f1668e1f41b9fbdb0ae075b789c5fa0bec4b Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:52 -0700 Subject: btrfs: get rid of unnecessary memset() of work item Commit fc97fab0ea59 ("btrfs: Replace fs_info->qgroup_rescan_worker workqueue with btrfs_workqueue.") converted qgroup_rescan_work to be initialized with btrfs_init_work(), but it left behind an unnecessary memset(). Get rid of the memset(). Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3ad151655eb8..55678837bb84 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3277,8 +3277,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); - memset(&fs_info->qgroup_rescan_work, 0, - sizeof(fs_info->qgroup_rescan_work)); btrfs_init_work(&fs_info->qgroup_rescan_work, btrfs_qgroup_rescan_helper, btrfs_qgroup_rescan_worker, NULL, NULL); -- cgit v1.2.3-59-g8ed1b From c495dcd6fbe1dce51811a76bb85b4675f6494938 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:53 -0700 Subject: btrfs: don't prematurely free work in run_ordered_work() We hit the following very strange deadlock on a system with Btrfs on a loop device backed by another Btrfs filesystem: 1. The top (loop device) filesystem queues an async_cow work item from cow_file_range_async(). We'll call this work X. 2. Worker thread A starts work X (normal_work_helper()). 3. Worker thread A executes the ordered work for the top filesystem (run_ordered_work()). 4. Worker thread A finishes the ordered work for work X and frees X (work->ordered_free()). 5. Worker thread A executes another ordered work and gets blocked on I/O to the bottom filesystem (still in run_ordered_work()). 6. Meanwhile, the bottom filesystem allocates and queues an async_cow work item which happens to be the recently-freed X. 7. The workqueue code sees that X is already being executed by worker thread A, so it schedules X to be executed _after_ worker thread A finishes (see the find_worker_executing_work() call in process_one_work()). Now, the top filesystem is waiting for I/O on the bottom filesystem, but the bottom filesystem is waiting for the top filesystem to finish, so we deadlock. This happens because we are breaking the workqueue assumption that a work item cannot be recycled while it still depends on other work. Fix it by waiting to free the work item until we are done with all of the related ordered work. P.S.: One might ask why the workqueue code doesn't try to detect a recycled work item. It actually does try by checking whether the work item has the same work function (find_worker_executing_work()), but in our case the function is the same. This is the only key that the workqueue code has available to compare, short of adding an additional, layer-violating "custom key". Considering that we're the only ones that have ever hit this, we should just play by the rules. Unfortunately, we haven't been able to create a minimal reproducer other than our full container setup using a compress-force=zstd filesystem on top of another compress-force=zstd filesystem. Suggested-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 56 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 2e9e13ffbd08..10a04b99798a 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -252,16 +252,17 @@ out: } } -static void run_ordered_work(struct __btrfs_workqueue *wq) +static void run_ordered_work(struct __btrfs_workqueue *wq, + struct btrfs_work *self) { struct list_head *list = &wq->ordered_list; struct btrfs_work *work; spinlock_t *lock = &wq->list_lock; unsigned long flags; + void *wtag; + bool free_self = false; while (1) { - void *wtag; - spin_lock_irqsave(lock, flags); if (list_empty(list)) break; @@ -287,16 +288,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) list_del(&work->ordered_list); spin_unlock_irqrestore(lock, flags); - /* - * We don't want to call the ordered free functions with the - * lock held though. Save the work as tag for the trace event, - * because the callback could free the structure. - */ - wtag = work; - work->ordered_free(work); - trace_btrfs_all_work_done(wq->fs_info, wtag); + if (work == self) { + /* + * This is the work item that the worker is currently + * executing. + * + * The kernel workqueue code guarantees non-reentrancy + * of work items. I.e., if a work item with the same + * address and work function is queued twice, the second + * execution is blocked until the first one finishes. A + * work item may be freed and recycled with the same + * work function; the workqueue code assumes that the + * original work item cannot depend on the recycled work + * item in that case (see find_worker_executing_work()). + * + * Note that the work of one Btrfs filesystem may depend + * on the work of another Btrfs filesystem via, e.g., a + * loop device. Therefore, we must not allow the current + * work item to be recycled until we are really done, + * otherwise we break the above assumption and can + * deadlock. + */ + free_self = true; + } else { + /* + * We don't want to call the ordered free functions with + * the lock held though. Save the work as tag for the + * trace event, because the callback could free the + * structure. + */ + wtag = work; + work->ordered_free(work); + trace_btrfs_all_work_done(wq->fs_info, wtag); + } } spin_unlock_irqrestore(lock, flags); + + if (free_self) { + wtag = self; + self->ordered_free(self); + trace_btrfs_all_work_done(wq->fs_info, wtag); + } } static void normal_work_helper(struct btrfs_work *work) @@ -324,7 +356,7 @@ static void normal_work_helper(struct btrfs_work *work) work->func(work); if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); - run_ordered_work(wq); + run_ordered_work(wq, work); } if (!need_order) trace_btrfs_all_work_done(wq->fs_info, wtag); -- cgit v1.2.3-59-g8ed1b From 9be490f1e15c34193b1aae17da58e14dd9f55a95 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:54 -0700 Subject: btrfs: don't prematurely free work in end_workqueue_fn() Currently, end_workqueue_fn() frees the end_io_wq entry (which embeds the work item) and then calls bio_endio(). This is another potential instance of the bug in "btrfs: don't prematurely free work in run_ordered_work()". In particular, the endio call may depend on other work items. For example, btrfs_end_dio_bio() can call btrfs_subio_endio_read() -> __btrfs_correct_data_nocsum() -> dio_read_error() -> submit_dio_repair_bio(), which submits a bio that is also completed through a end_workqueue_fn() work item. However, __btrfs_correct_data_nocsum() waits for the newly submitted bio to complete, thus it depends on another work item. This example currently usually works because we use different workqueue helper functions for BTRFS_WQ_ENDIO_DATA and BTRFS_WQ_ENDIO_DIO_REPAIR. However, it may deadlock with stacked filesystems and is fragile overall. The proper fix is to free the work item at the very end of the work function, so let's do that. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 402b61bf345c..3895c21853cc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1657,8 +1657,8 @@ static void end_workqueue_fn(struct btrfs_work *work) bio->bi_status = end_io_wq->status; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; - kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); bio_endio(bio); + kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); } static int cleaner_kthread(void *arg) -- cgit v1.2.3-59-g8ed1b From e732fe95e4cad35fc1df278c23a32903341b08b3 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:55 -0700 Subject: btrfs: don't prematurely free work in reada_start_machine_worker() Currently, reada_start_machine_worker() frees the reada_machine_work and then calls __reada_start_machine() to do readahead. This is another potential instance of the bug in "btrfs: don't prematurely free work in run_ordered_work()". There _might_ already be a deadlock here: reada_start_machine_worker() can depend on itself through stacked filesystems (__read_start_machine() -> reada_start_machine_dev() -> reada_tree_block_flagged() -> read_extent_buffer_pages() -> submit_one_bio() -> btree_submit_bio_hook() -> btrfs_map_bio() -> submit_stripe_bio() -> submit_bio() onto a loop device can trigger readahead on the lower filesystem). Either way, let's fix it by freeing the work at the end. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/reada.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index ee6f60547a8d..dd4f9c2b7107 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev) static void reada_start_machine_worker(struct btrfs_work *work) { struct reada_machine_work *rmw; - struct btrfs_fs_info *fs_info; int old_ioprio; rmw = container_of(work, struct reada_machine_work, work); - fs_info = rmw->fs_info; - - kfree(rmw); old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), task_nice_ioprio(current)); set_task_ioprio(current, BTRFS_IOPRIO_READA); - __reada_start_machine(fs_info); + __reada_start_machine(rmw->fs_info); set_task_ioprio(current, old_ioprio); - atomic_dec(&fs_info->reada_works_cnt); + atomic_dec(&rmw->fs_info->reada_works_cnt); + + kfree(rmw); } static void __reada_start_machine(struct btrfs_fs_info *fs_info) -- cgit v1.2.3-59-g8ed1b From 57d4f0b863272ba04ba85f86bfdc0f976f0af91c Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:56 -0700 Subject: btrfs: don't prematurely free work in scrub_missing_raid56_worker() Currently, scrub_missing_raid56_worker() puts and potentially frees sblock (which embeds the work item) and then submits a bio through scrub_wr_submit(). This is another potential instance of the bug in "btrfs: don't prematurely free work in run_ordered_work()". Fix it by dropping the reference after we submit the bio. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/scrub.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f7d4e03f4c5d..a0770a6aee00 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2149,14 +2149,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work) scrub_write_block_to_dev_replace(sblock); } - scrub_block_put(sblock); - if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } + scrub_block_put(sblock); scrub_pending_bio_dec(sctx); } -- cgit v1.2.3-59-g8ed1b From a0cac0ec961f0d42828eeef196ac2246a2f07659 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:57 -0700 Subject: btrfs: get rid of unique workqueue helper functions Commit 9e0af2376434 ("Btrfs: fix task hang under heavy compressed write") worked around the issue that a recycled work item could get a false dependency on the original work item due to how the workqueue code guarantees non-reentrancy. It did so by giving different work functions to different types of work. However, the fixes in the previous few patches are more complete, as they prevent a work item from being recycled at all (except for a tiny window that the kernel workqueue code handles for us). This obsoletes the previous fix, so we don't need the unique helpers for correctness. The only other reason to keep them would be so they show up in stack traces, but they always seem to be optimized to a tail call, so they don't show up anyways. So, let's just get rid of the extra indirection. While we're here, rename normal_work_helper() to the more informative btrfs_work_helper(). Reviewed-by: Nikolay Borisov Reviewed-by: Filipe Manana Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 58 +++++++++++------------------------------------- fs/btrfs/async-thread.h | 33 ++------------------------- fs/btrfs/block-group.c | 3 +-- fs/btrfs/delayed-inode.c | 4 ++-- fs/btrfs/disk-io.c | 34 +++++++++------------------- fs/btrfs/inode.c | 36 +++++++++--------------------- fs/btrfs/ordered-data.c | 1 - fs/btrfs/qgroup.c | 1 - fs/btrfs/raid56.c | 5 ++--- fs/btrfs/reada.c | 3 +-- fs/btrfs/scrub.c | 14 +++++------- fs/btrfs/volumes.c | 3 +-- 12 files changed, 50 insertions(+), 145 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 10a04b99798a..3f3110975f88 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -53,16 +53,6 @@ struct btrfs_workqueue { struct __btrfs_workqueue *high; }; -static void normal_work_helper(struct btrfs_work *work); - -#define BTRFS_WORK_HELPER(name) \ -noinline_for_stack void btrfs_##name(struct work_struct *arg) \ -{ \ - struct btrfs_work *work = container_of(arg, struct btrfs_work, \ - normal_work); \ - normal_work_helper(work); \ -} - struct btrfs_fs_info * btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) { @@ -89,29 +79,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; } -BTRFS_WORK_HELPER(worker_helper); -BTRFS_WORK_HELPER(delalloc_helper); -BTRFS_WORK_HELPER(flush_delalloc_helper); -BTRFS_WORK_HELPER(cache_helper); -BTRFS_WORK_HELPER(submit_helper); -BTRFS_WORK_HELPER(fixup_helper); -BTRFS_WORK_HELPER(endio_helper); -BTRFS_WORK_HELPER(endio_meta_helper); -BTRFS_WORK_HELPER(endio_meta_write_helper); -BTRFS_WORK_HELPER(endio_raid56_helper); -BTRFS_WORK_HELPER(endio_repair_helper); -BTRFS_WORK_HELPER(rmw_helper); -BTRFS_WORK_HELPER(endio_write_helper); -BTRFS_WORK_HELPER(freespace_write_helper); -BTRFS_WORK_HELPER(delayed_meta_helper); -BTRFS_WORK_HELPER(readahead_helper); -BTRFS_WORK_HELPER(qgroup_rescan_helper); -BTRFS_WORK_HELPER(extent_refs_helper); -BTRFS_WORK_HELPER(scrub_helper); -BTRFS_WORK_HELPER(scrubwrc_helper); -BTRFS_WORK_HELPER(scrubnc_helper); -BTRFS_WORK_HELPER(scrubparity_helper); - static struct __btrfs_workqueue * __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh) @@ -302,12 +269,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, * original work item cannot depend on the recycled work * item in that case (see find_worker_executing_work()). * - * Note that the work of one Btrfs filesystem may depend - * on the work of another Btrfs filesystem via, e.g., a - * loop device. Therefore, we must not allow the current - * work item to be recycled until we are really done, - * otherwise we break the above assumption and can - * deadlock. + * Note that different types of Btrfs work can depend on + * each other, and one type of work on one Btrfs + * filesystem may even depend on the same type of work + * on another Btrfs filesystem via, e.g., a loop device. + * Therefore, we must not allow the current work item to + * be recycled until we are really done, otherwise we + * break the above assumption and can deadlock. */ free_self = true; } else { @@ -331,8 +299,10 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, } } -static void normal_work_helper(struct btrfs_work *work) +static void btrfs_work_helper(struct work_struct *normal_work) { + struct btrfs_work *work = container_of(normal_work, struct btrfs_work, + normal_work); struct __btrfs_workqueue *wq; void *wtag; int need_order = 0; @@ -362,15 +332,13 @@ static void normal_work_helper(struct btrfs_work *work) trace_btrfs_all_work_done(wq->fs_info, wtag); } -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free) +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free) { work->func = func; work->ordered_func = ordered_func; work->ordered_free = ordered_free; - INIT_WORK(&work->normal_work, uniq_func); + INIT_WORK(&work->normal_work, btrfs_work_helper); INIT_LIST_HEAD(&work->ordered_list); work->flags = 0; } diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 7861c9feba5f..c5bf2b117c05 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -29,42 +29,13 @@ struct btrfs_work { unsigned long flags; }; -#define BTRFS_WORK_HELPER_PROTO(name) \ -void btrfs_##name(struct work_struct *arg) - -BTRFS_WORK_HELPER_PROTO(worker_helper); -BTRFS_WORK_HELPER_PROTO(delalloc_helper); -BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); -BTRFS_WORK_HELPER_PROTO(cache_helper); -BTRFS_WORK_HELPER_PROTO(submit_helper); -BTRFS_WORK_HELPER_PROTO(fixup_helper); -BTRFS_WORK_HELPER_PROTO(endio_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); -BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); -BTRFS_WORK_HELPER_PROTO(endio_repair_helper); -BTRFS_WORK_HELPER_PROTO(rmw_helper); -BTRFS_WORK_HELPER_PROTO(endio_write_helper); -BTRFS_WORK_HELPER_PROTO(freespace_write_helper); -BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); -BTRFS_WORK_HELPER_PROTO(readahead_helper); -BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); -BTRFS_WORK_HELPER_PROTO(extent_refs_helper); -BTRFS_WORK_HELPER_PROTO(scrub_helper); -BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); -BTRFS_WORK_HELPER_PROTO(scrubnc_helper); -BTRFS_WORK_HELPER_PROTO(scrubparity_helper); - - struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh); -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free); +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free); void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 670700cb1110..53e08f925260 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -695,8 +695,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, caching_ctl->block_group = cache; caching_ctl->progress = cache->key.objectid; refcount_set(&caching_ctl->count, 1); - btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, - caching_thread, NULL, NULL); + btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); spin_lock(&cache->lock); /* diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 1f7f39b10bd0..49ec3402886a 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1367,8 +1367,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, return -ENOMEM; async_work->delayed_root = delayed_root; - btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, - btrfs_async_run_delayed_root, NULL, NULL); + btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, + NULL); async_work->nr = nr; btrfs_queue_work(fs_info->delayed_workers, &async_work->work); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3895c21853cc..bae334212ee2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -706,43 +706,31 @@ static void end_workqueue_bio(struct bio *bio) struct btrfs_end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; struct btrfs_workqueue *wq; - btrfs_work_func_t func; fs_info = end_io_wq->info; end_io_wq->status = bio->bi_status; if (bio_op(bio) == REQ_OP_WRITE) { - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) wq = fs_info->endio_meta_write_workers; - func = btrfs_endio_meta_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } } else { - if (unlikely(end_io_wq->metadata == - BTRFS_WQ_ENDIO_DIO_REPAIR)) { + if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) wq = fs_info->endio_repair_workers; - func = btrfs_endio_repair_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else if (end_io_wq->metadata) { + else if (end_io_wq->metadata) wq = fs_info->endio_meta_workers; - func = btrfs_endio_meta_helper; - } else { + else wq = fs_info->endio_workers; - func = btrfs_endio_helper; - } } - btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); + btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); btrfs_queue_work(wq, &end_io_wq->work); } @@ -835,8 +823,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; - btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, - run_one_async_done, run_one_async_free); + btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, + run_one_async_free); async->bio_offset = bio_offset; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 015910079e73..d3f7abf50c91 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1268,10 +1268,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_chunk[i].write_flags = write_flags; INIT_LIST_HEAD(&async_chunk[i].extents); - btrfs_init_work(&async_chunk[i].work, - btrfs_delalloc_helper, - async_cow_start, async_cow_submit, - async_cow_free); + btrfs_init_work(&async_chunk[i].work, async_cow_start, + async_cow_submit, async_cow_free); nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); atomic_add(nr_pages, &fs_info->async_delalloc_pages); @@ -2260,8 +2258,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) SetPageChecked(page); get_page(page); - btrfs_init_work(&fixup->work, btrfs_fixup_helper, - btrfs_writepage_fixup_worker, NULL, NULL); + btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); fixup->page = page; btrfs_queue_work(fs_info->fixup_workers, &fixup->work); return -EBUSY; @@ -3254,7 +3251,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered_extent = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); @@ -3263,16 +3259,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, end - start + 1, uptodate)) return; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } - btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, - NULL); + btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); } @@ -8211,18 +8203,14 @@ static void __endio_write_update_ordered(struct inode *inode, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; u64 ordered_offset = offset; u64 ordered_bytes = bytes; u64 last_offset; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } while (ordered_offset < offset + bytes) { last_offset = ordered_offset; @@ -8230,9 +8218,8 @@ static void __endio_write_update_ordered(struct inode *inode, &ordered_offset, ordered_bytes, uptodate)) { - btrfs_init_work(&ordered->work, func, - finish_ordered_fn, - NULL, NULL); + btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, + NULL); btrfs_queue_work(wq, &ordered->work); } /* @@ -10116,8 +10103,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode init_completion(&work->completion); INIT_LIST_HEAD(&work->list); work->inode = inode; - btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, - btrfs_run_delalloc_work, NULL, NULL); + btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); return work; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 24b6c72b9a59..6240a5a1f2c0 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, spin_unlock(&root->ordered_extent_lock); btrfs_init_work(&ordered->flush_work, - btrfs_flush_delalloc_helper, btrfs_run_ordered_extent_work, NULL, NULL); list_add_tail(&ordered->work_list, &works); btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 55678837bb84..fde0973d893a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3278,7 +3278,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, mutex_unlock(&fs_info->qgroup_rescan_lock); btrfs_init_work(&fs_info->qgroup_rescan_work, - btrfs_qgroup_rescan_helper, btrfs_qgroup_rescan_worker, NULL, NULL); return 0; } diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 57a2ac721985..8f47a85944eb 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work); static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) { - btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); + btrfs_init_work(&rbio->work, work_func, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } @@ -1743,8 +1743,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) plug = container_of(cb, struct btrfs_plug_cb, cb); if (from_schedule) { - btrfs_init_work(&plug->work, btrfs_rmw_helper, - unplug_work, NULL, NULL); + btrfs_init_work(&plug->work, unplug_work, NULL, NULL); btrfs_queue_work(plug->info->rmw_workers, &plug->work); return; diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index dd4f9c2b7107..1feaeadc8cf5 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -819,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) /* FIXME we cannot handle this properly right now */ BUG(); } - btrfs_init_work(&rmw->work, btrfs_readahead_helper, - reada_start_machine_worker, NULL, NULL); + btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); rmw->fs_info = fs_info; btrfs_queue_work(fs_info->readahead_workers, &rmw->work); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a0770a6aee00..a7b043fd7a57 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -598,8 +598,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; - btrfs_init_work(&sbio->work, btrfs_scrub_helper, - scrub_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, + NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; @@ -1720,8 +1720,7 @@ static void scrub_wr_bio_end_io(struct bio *bio) sbio->status = bio->bi_status; sbio->bio = bio; - btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, - scrub_wr_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } @@ -2203,8 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) raid56_add_scrub_pages(rbio, spage->page, spage->logical); } - btrfs_init_work(&sblock->work, btrfs_scrub_helper, - scrub_missing_raid56_worker, NULL, NULL); + btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); @@ -2742,8 +2740,8 @@ static void scrub_parity_bio_endio(struct bio *bio) bio_put(bio); - btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, - scrub_parity_bio_endio_worker, NULL, NULL); + btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, + NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e04409f85063..d8d7b1ee83ca 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6676,8 +6676,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, else generate_random_uuid(dev->uuid); - btrfs_init_work(&dev->work, btrfs_submit_helper, - pending_bios_fn, NULL, NULL); + btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); return dev; } -- cgit v1.2.3-59-g8ed1b From c9eb55db8439057165f106164622c146cdd59468 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Sep 2019 11:30:58 -0700 Subject: btrfs: get rid of pointless wtag variable in async-thread.c Commit ac0c7cf8be00 ("btrfs: fix crash when tracepoint arguments are freed by wq callbacks") added a void pointer, wtag, which is passed into trace_btrfs_all_work_done() instead of the freed work item. This is silly for a few reasons: 1. The freed work item still has the same address. 2. work is still in scope after it's freed, so assigning wtag doesn't stop anyone from using it. 3. The tracepoint has always taken a void * argument, so assigning wtag doesn't actually make things any more type-safe. (Note that the original bug in commit bc074524e123 ("btrfs: prefix fsid to all trace events") was that the void * was implicitly casted when it was passed to btrfs_work_owner() in the trace point itself). Instead, let's add some clearer warnings as comments. Reviewed-by: Nikolay Borisov Reviewed-by: Filipe Manana Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 21 ++++++++------------- include/trace/events/btrfs.h | 6 +++--- 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 3f3110975f88..b97ae1b03417 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -226,7 +226,6 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, struct btrfs_work *work; spinlock_t *lock = &wq->list_lock; unsigned long flags; - void *wtag; bool free_self = false; while (1) { @@ -281,21 +280,19 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, } else { /* * We don't want to call the ordered free functions with - * the lock held though. Save the work as tag for the - * trace event, because the callback could free the - * structure. + * the lock held. */ - wtag = work; work->ordered_free(work); - trace_btrfs_all_work_done(wq->fs_info, wtag); + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); } } spin_unlock_irqrestore(lock, flags); if (free_self) { - wtag = self; self->ordered_free(self); - trace_btrfs_all_work_done(wq->fs_info, wtag); + /* NB: self must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, self); } } @@ -304,7 +301,6 @@ static void btrfs_work_helper(struct work_struct *normal_work) struct btrfs_work *work = container_of(normal_work, struct btrfs_work, normal_work); struct __btrfs_workqueue *wq; - void *wtag; int need_order = 0; /* @@ -318,8 +314,6 @@ static void btrfs_work_helper(struct work_struct *normal_work) if (work->ordered_func) need_order = 1; wq = work->wq; - /* Safe for tracepoints in case work gets freed by the callback */ - wtag = work; trace_btrfs_work_sched(work); thresh_exec_hook(wq); @@ -327,9 +321,10 @@ static void btrfs_work_helper(struct work_struct *normal_work) if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq, work); + } else { + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); } - if (!need_order) - trace_btrfs_all_work_done(wq->fs_info, wtag); } void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 75ae1899452b..8ca7401bc2fb 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1389,9 +1389,9 @@ DECLARE_EVENT_CLASS(btrfs__work, ); /* - * For situiations when the work is freed, we pass fs_info and a tag that that - * matches address of the work structure so it can be paired with the - * scheduling event. + * For situations when the work is freed, we pass fs_info and a tag that matches + * the address of the work structure so it can be paired with the scheduling + * event. DO NOT add anything here that dereferences wtag. */ DECLARE_EVENT_CLASS(btrfs__work__done, -- cgit v1.2.3-59-g8ed1b From d6156218bec93965b6a43ba2686ad962ce77c854 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 24 Sep 2019 18:29:10 +0200 Subject: btrfs: make locking assertion helpers static inline I've noticed that none of the btrfs_assert_*lock* debugging helpers is inlined, despite they're short and mostly a value update. Making them inline shaves 67 from the text size, reduces stack consumption and perhaps also slightly improves the performance due to avoiding unnecessary calls. Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/locking.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 7f9a578a1a20..409c5a865079 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -14,55 +14,55 @@ #include "locking.h" #ifdef CONFIG_BTRFS_DEBUG -static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers); eb->spinning_writers++; } -static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers != 1); eb->spinning_writers--; } -static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) +static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers); } -static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { atomic_inc(&eb->spinning_readers); } -static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { WARN_ON(atomic_read(&eb->spinning_readers) == 0); atomic_dec(&eb->spinning_readers); } -static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { atomic_inc(&eb->read_locks); } -static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { atomic_dec(&eb->read_locks); } -static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { BUG_ON(!atomic_read(&eb->read_locks)); } -static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) +static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { eb->write_locks++; } -static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) +static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { eb->write_locks--; } -- cgit v1.2.3-59-g8ed1b From 31f6e769ce4145fdfaba44ad7c5b30ad21f58c11 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 24 Sep 2019 18:44:24 +0200 Subject: btrfs: make btrfs_assert_tree_locked static inline The function btrfs_assert_tree_locked is used outside of the locking code so it is exported, however we can make it static inine as it's fairly trivial. This is the only locking assertion used in release builds, inlining improves the text size by 174 bytes and reduces stack consumption in the callers. Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/locking.c | 6 ------ fs/btrfs/locking.h | 10 +++++++++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 409c5a865079..028513153ac4 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -67,11 +67,6 @@ static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) eb->write_locks--; } -void btrfs_assert_tree_locked(struct extent_buffer *eb) -{ - BUG_ON(!eb->write_locks); -} - #else static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { } static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { } @@ -81,7 +76,6 @@ static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { } -void btrfs_assert_tree_locked(struct extent_buffer *eb) { } static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { } #endif diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b775a4207ed9..ab4020de25e7 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -6,6 +6,8 @@ #ifndef BTRFS_LOCKING_H #define BTRFS_LOCKING_H +#include "extent_io.h" + #define BTRFS_WRITE_LOCK 1 #define BTRFS_READ_LOCK 2 #define BTRFS_WRITE_LOCK_BLOCKING 3 @@ -19,11 +21,17 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb); void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); void btrfs_set_lock_blocking_read(struct extent_buffer *eb); void btrfs_set_lock_blocking_write(struct extent_buffer *eb); -void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); +#ifdef CONFIG_BTRFS_DEBUG +static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { + BUG_ON(!eb->write_locks); +} +#else +static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } +#endif static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { -- cgit v1.2.3-59-g8ed1b From ed2b1d36a9d027f9b841be5bfc9d61011462d447 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 24 Sep 2019 19:17:17 +0200 Subject: btrfs: move btrfs_set_path_blocking to other locking functions The function belongs to the family of locking functions, so move it there. The 'noinline' keyword is dropped as it's now an exported function that does not need it. Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 25 ------------------------- fs/btrfs/locking.c | 26 ++++++++++++++++++++++++++ fs/btrfs/locking.h | 2 ++ 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0231141de289..a55d55e5c913 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -56,31 +56,6 @@ struct btrfs_path *btrfs_alloc_path(void) return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); } -/* - * set all locked nodes in the path to blocking locks. This should - * be done before scheduling - */ -noinline void btrfs_set_path_blocking(struct btrfs_path *p) -{ - int i; - for (i = 0; i < BTRFS_MAX_LEVEL; i++) { - if (!p->nodes[i] || !p->locks[i]) - continue; - /* - * If we currently have a spinning reader or writer lock this - * will bump the count of blocking holders and drop the - * spinlock. - */ - if (p->locks[i] == BTRFS_READ_LOCK) { - btrfs_set_lock_blocking_read(p->nodes[i]); - p->locks[i] = BTRFS_READ_LOCK_BLOCKING; - } else if (p->locks[i] == BTRFS_WRITE_LOCK) { - btrfs_set_lock_blocking_write(p->nodes[i]); - p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; - } - } -} - /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 028513153ac4..f58606887859 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -316,3 +316,29 @@ void btrfs_tree_unlock(struct extent_buffer *eb) write_unlock(&eb->lock); } } + +/* + * Set all locked nodes in the path to blocking locks. This should be done + * before scheduling + */ +void btrfs_set_path_blocking(struct btrfs_path *p) +{ + int i; + + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + if (!p->nodes[i] || !p->locks[i]) + continue; + /* + * If we currently have a spinning reader or writer lock this + * will bump the count of blocking holders and drop the + * spinlock. + */ + if (p->locks[i] == BTRFS_READ_LOCK) { + btrfs_set_lock_blocking_read(p->nodes[i]); + p->locks[i] = BTRFS_READ_LOCK_BLOCKING; + } else if (p->locks[i] == BTRFS_WRITE_LOCK) { + btrfs_set_lock_blocking_write(p->nodes[i]); + p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; + } + } +} diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index ab4020de25e7..98c92222eaf0 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -33,6 +33,8 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } #endif +void btrfs_set_path_blocking(struct btrfs_path *p); + static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) -- cgit v1.2.3-59-g8ed1b From 1f95ec012cb4a3fabfef3efd9ba0b59e14ce48ce Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 24 Sep 2019 19:17:17 +0200 Subject: btrfs: move btrfs_unlock_up_safe to other locking functions The function belongs to the family of locking functions, so move it there. The 'noinline' keyword is dropped as it's now an exported function that does not need it. Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 26 -------------------------- fs/btrfs/ctree.h | 2 -- fs/btrfs/delayed-inode.c | 1 + fs/btrfs/locking.c | 26 ++++++++++++++++++++++++++ fs/btrfs/locking.h | 1 + 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a55d55e5c913..f2f9cf1149a4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2353,32 +2353,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level, } } -/* - * This releases any locks held in the path starting at level and - * going all the way up to the root. - * - * btrfs_search_slot will keep the lock held on higher nodes in a few - * corner cases, such as COW of the block at slot zero in the node. This - * ignores those rules, and it should only be called when there are no - * more updates to be done higher up in the tree. - */ -noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) -{ - int i; - - if (path->keep_locks) - return; - - for (i = level; i < BTRFS_MAX_LEVEL; i++) { - if (!path->nodes[i]) - continue; - if (!path->locks[i]) - continue; - btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); - path->locks[i] = 0; - } -} - /* * helper function for btrfs_search_slot. The goal is to find a block * in cache without setting the path to blocking. If we find the block diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9939b552089b..668524097a3c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2568,8 +2568,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, void btrfs_release_path(struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); -void btrfs_set_path_blocking(struct btrfs_path *p); -void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int slot, int nr); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 49ec3402886a..942cc2a59164 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -12,6 +12,7 @@ #include "transaction.h" #include "ctree.h" #include "qgroup.h" +#include "locking.h" #define BTRFS_DELAYED_WRITEBACK 512 #define BTRFS_DELAYED_BACKGROUND 128 diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index f58606887859..93146b495276 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -342,3 +342,29 @@ void btrfs_set_path_blocking(struct btrfs_path *p) } } } + +/* + * This releases any locks held in the path starting at level and going all the + * way up to the root. + * + * btrfs_search_slot will keep the lock held on higher nodes in a few corner + * cases, such as COW of the block at slot zero in the node. This ignores + * those rules, and it should only be called when there are no more updates to + * be done higher up in the tree. + */ +void btrfs_unlock_up_safe(struct btrfs_path *path, int level) +{ + int i; + + if (path->keep_locks) + return; + + for (i = level; i < BTRFS_MAX_LEVEL; i++) { + if (!path->nodes[i]) + continue; + if (!path->locks[i]) + continue; + btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); + path->locks[i] = 0; + } +} diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 98c92222eaf0..21a285883e89 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -34,6 +34,7 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } #endif void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_unlock_up_safe(struct btrfs_path *path, int level); static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { -- cgit v1.2.3-59-g8ed1b From e62869be1ed7569bf12a5dc28c5b7ee5823f9cf3 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 25 Sep 2019 14:29:28 +0800 Subject: btrfs: balance: use term redundancy instead of integrity in message When balance reduces the number of copies of metadata, it reduces the redundancy, use the term redundancy instead of integrity. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d8d7b1ee83ca..8d1555be03ad 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4036,7 +4036,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, int ret; u64 num_devices; unsigned seq; - bool reducing_integrity; + bool reducing_redundancy; int i; if (btrfs_fs_closing(fs_info) || @@ -4119,9 +4119,9 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_metadata_alloc_bits & allowed) && !(bctl->meta.target & allowed))) - reducing_integrity = true; + reducing_redundancy = true; else - reducing_integrity = false; + reducing_redundancy = false; /* if we're not converting, the target field is uninitialized */ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? @@ -4130,13 +4130,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, bctl->data.target : fs_info->avail_data_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); - if (reducing_integrity) { + if (reducing_redundancy) { if (bctl->flags & BTRFS_BALANCE_FORCE) { btrfs_info(fs_info, - "balance: force reducing metadata integrity"); + "balance: force reducing metadata redundancy"); } else { btrfs_err(fs_info, - "balance: reduces metadata integrity, use --force if you want this"); + "balance: reduces metadata redundancy, use --force if you want this"); ret = -EINVAL; goto out; } -- cgit v1.2.3-59-g8ed1b From 79c8264e443471eb2748c93680acce54d14d2225 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 19:40:15 +0200 Subject: btrfs: add 64bit safe helper for power of two checks As is_power_of_two takes unsigned long, it's not safe on 32bit architectures, but we could pass any u64 value in seveal places. Add a separate helper and also an alias that better expresses the purpose for which the helper is used. Signed-off-by: David Sterba --- fs/btrfs/misc.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index 7d564924dfeb..72bab64ecf60 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -47,4 +47,15 @@ static inline u64 div_factor_fine(u64 num, int factor) return div_u64(num, 100); } +/* Copy of is_power_of_two that is 64bit safe */ +static inline bool is_power_of_two_u64(u64 n) +{ + return n != 0 && (n & (n - 1)) == 0; +} + +static inline bool has_single_bit_set(u64 n) +{ + return is_power_of_two_u64(n); +} + #endif -- cgit v1.2.3-59-g8ed1b From c1499166d10ae734b5ec5cc7982bd9b9ee7f9fe6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 19:44:42 +0200 Subject: btrfs: use has_single_bit_set for clarity Replace is_power_of_2 with the helper that is self-documenting and remove the open coded call in alloc_profile_is_valid. Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 15 ++++++++------- fs/btrfs/volumes.c | 7 +------ 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index f041ec5f415c..fa4848a87430 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -23,6 +23,7 @@ #include "disk-io.h" #include "compression.h" #include "volumes.h" +#include "misc.h" /* * Error message should follow the following format: @@ -632,7 +633,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, return -EUCLEAN; } - if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && + if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) { chunk_err(leaf, chunk, logical, "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", @@ -808,11 +809,11 @@ static int check_inode_item(struct extent_buffer *leaf, } /* - * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2, - * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG. - * Only needs to check BLK, LNK and SOCKS + * S_IFMT is not bit mapped so we can't completely rely on + * is_power_of_2/has_single_bit_set, but it can save us from checking + * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS */ - if (!is_power_of_2(mode & S_IFMT)) { + if (!has_single_bit_set(mode & S_IFMT)) { if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) { inode_item_err(fs_info, leaf, slot, "invalid mode: has 0%o expect valid S_IF* bit(s)", @@ -1033,8 +1034,8 @@ static int check_extent_item(struct extent_buffer *leaf, btrfs_super_generation(fs_info->super_copy) + 1); return -EUCLEAN; } - if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA | - BTRFS_EXTENT_FLAG_TREE_BLOCK))) { + if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA | + BTRFS_EXTENT_FLAG_TREE_BLOCK))) { extent_err(leaf, slot, "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", flags, BTRFS_EXTENT_FLAG_DATA | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8d1555be03ad..f7b45e4b7de4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3844,12 +3844,7 @@ static int alloc_profile_is_valid(u64 flags, int extended) if (flags == 0) return !extended; /* "0" is valid for usual profiles */ - /* true if exactly one bit set */ - /* - * Don't use is_power_of_2(unsigned long) because it won't work - * for the single profile (1ULL << 48) on 32-bit CPUs. - */ - return flags != 0 && (flags & (flags - 1)) == 0; + return has_single_bit_set(flags); } static inline int balance_need_close(struct btrfs_fs_info *fs_info) -- cgit v1.2.3-59-g8ed1b From 61c047b541b56f4ec78886e9d695e0a3bbcd3834 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 22 Aug 2019 15:24:59 +0800 Subject: btrfs: transaction: describe transaction states and transitions Add an overview of the basic btrfs transaction transitions, including the following states: - No transaction states - Transaction N [[TRANS_STATE_RUNNING]] - Transaction N [[TRANS_STATE_COMMIT_START]] - Transaction N [[TRANS_STATE_COMMIT_DOING]] - Transaction N [[TRANS_STATE_UNBLOCKED]] - Transaction N [[TRANS_STATE_COMPLETED]] For each state, the comment will include: - Basic explaination about current state - How to go next stage - What will happen if we call various start_transaction() functions - Relationship to transaction N+1 This doesn't provide tech details, but serves as a cheat sheet for reader to get into the code a little easier. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 92ffe1df3fab..b60c9b871b9e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -24,6 +24,77 @@ #define BTRFS_ROOT_TRANS_TAG 0 +/* + * Transaction states and transitions + * + * No running transaction (fs tree blocks are not modified) + * | + * | To next stage: + * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). + * V + * Transaction N [[TRANS_STATE_RUNNING]] + * | + * | New trans handles can be attached to transaction N by calling all + * | start_transaction() variants. + * | + * | To next stage: + * | Call btrfs_commit_transaction() on any trans handle attached to + * | transaction N + * V + * Transaction N [[TRANS_STATE_COMMIT_START]] + * | + * | Will wait for previous running transaction to completely finish if there + * | is one + * | + * | Then one of the following happes: + * | - Wait for all other trans handle holders to release. + * | The btrfs_commit_transaction() caller will do the commit work. + * | - Wait for current transaction to be committed by others. + * | Other btrfs_commit_transaction() caller will do the commit work. + * | + * | At this stage, only btrfs_join_transaction*() variants can attach + * | to this running transaction. + * | All other variants will wait for current one to finish and attach to + * | transaction N+1. + * | + * | To next stage: + * | Caller is chosen to commit transaction N, and all other trans handle + * | haven been released. + * V + * Transaction N [[TRANS_STATE_COMMIT_DOING]] + * | + * | The heavy lifting transaction work is started. + * | From running delayed refs (modifying extent tree) to creating pending + * | snapshots, running qgroups. + * | In short, modify supporting trees to reflect modifications of subvolume + * | trees. + * | + * | At this stage, all start_transaction() calls will wait for this + * | transaction to finish and attach to transaction N+1. + * | + * | To next stage: + * | Until all supporting trees are updated. + * V + * Transaction N [[TRANS_STATE_UNBLOCKED]] + * | Transaction N+1 + * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] + * | need to write them back to disk and update | + * | super blocks. | + * | | + * | At this stage, new transaction is allowed to | + * | start. | + * | All new start_transaction() calls will be | + * | attached to transid N+1. | + * | | + * | To next stage: | + * | Until all tree blocks are super blocks are | + * | written to block devices | + * V | + * Transaction N [[TRANS_STATE_COMPLETED]] V + * All tree blocks and super blocks are written. Transaction N+1 + * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] + * data structures will be cleaned up. | Life goes on + */ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_RUNNING] = 0U, [TRANS_STATE_BLOCKED] = __TRANS_START, -- cgit v1.2.3-59-g8ed1b From 3296bf562443a8ca35aaad959a76a49e9b412760 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 22 Aug 2019 15:25:00 +0800 Subject: btrfs: transaction: Cleanup unused TRANS_STATE_BLOCKED The state was introduced in commit 4a9d8bdee368 ("Btrfs: make the state of the transaction more readable"), then in commit 302167c50b32 ("btrfs: don't end the transaction for delayed refs in throttle") the state is completely removed. So we can just clean up the state since it's only compared but never set. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/transaction.c | 15 +++------------ fs/btrfs/transaction.h | 1 - 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index bae334212ee2..00954c99d259 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1741,7 +1741,7 @@ static int transaction_kthread(void *arg) } now = ktime_get_seconds(); - if (cur->state < TRANS_STATE_BLOCKED && + if (cur->state < TRANS_STATE_COMMIT_START && !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index b60c9b871b9e..54b8718054ce 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -97,7 +97,6 @@ */ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_RUNNING] = 0U, - [TRANS_STATE_BLOCKED] = __TRANS_START, [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | @@ -454,7 +453,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, static inline int is_transaction_blocked(struct btrfs_transaction *trans) { - return (trans->state >= TRANS_STATE_BLOCKED && + return (trans->state >= TRANS_STATE_COMMIT_START && trans->state < TRANS_STATE_UNBLOCKED && !trans->aborted); } @@ -641,7 +640,7 @@ again: INIT_LIST_HEAD(&h->new_bgs); smp_mb(); - if (cur_trans->state >= TRANS_STATE_BLOCKED && + if (cur_trans->state >= TRANS_STATE_COMMIT_START && may_wait_transaction(fs_info, type)) { current->journal_info = h; btrfs_commit_transaction(h); @@ -869,7 +868,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *cur_trans = trans->transaction; smp_mb(); - if (cur_trans->state >= TRANS_STATE_BLOCKED || + if (cur_trans->state >= TRANS_STATE_COMMIT_START || cur_trans->delayed_refs.flushing) return 1; @@ -902,7 +901,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; - int lock = (trans->type != TRANS_JOIN_NOLOCK); int err = 0; if (refcount_read(&trans->use_count) > 1) { @@ -918,13 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_chunk_metadata(trans); - if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { - if (throttle) - return btrfs_commit_transaction(trans); - else - wake_up_process(info->transaction_kthread); - } - if (trans->type & __TRANS_FREEZABLE) sb_end_intwrite(info->sb); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index c51135d9b448..2ac89fb0d709 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -13,7 +13,6 @@ enum btrfs_trans_state { TRANS_STATE_RUNNING, - TRANS_STATE_BLOCKED, TRANS_STATE_COMMIT_START, TRANS_STATE_COMMIT_DOING, TRANS_STATE_UNBLOCKED, -- cgit v1.2.3-59-g8ed1b From 89cbf5f6b6c2a5f0ac7cb83c8b3fd97eadba0d11 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 30 Aug 2019 17:44:47 +0300 Subject: btrfs: Don't opencode btrfs_find_name_in_backref in backref_in_log Direct replacement, though note that the inside of the loop in btrfs_find_name_in_backref is organized in a slightly different way but is equvalent. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba [ add changelog ] Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 48 ++++++++++++------------------------------------ 1 file changed, 12 insertions(+), 36 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8a6cc600bf18..1d7f22951ef2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -945,54 +945,30 @@ static noinline int backref_in_log(struct btrfs_root *log, const char *name, int namelen) { struct btrfs_path *path; - struct btrfs_inode_ref *ref; - unsigned long ptr; - unsigned long ptr_end; - unsigned long name_ptr; - int found_name_len; - int item_size; int ret; - int match = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, log, key, path, 0, 0); - if (ret != 0) - goto out; - - ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); - - if (key->type == BTRFS_INODE_EXTREF_KEY) { - if (btrfs_find_name_in_ext_backref(path->nodes[0], - path->slots[0], - ref_objectid, - name, namelen)) - match = 1; - + if (ret != 0) { + ret = 0; goto out; } - item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); - ptr_end = ptr + item_size; - while (ptr < ptr_end) { - ref = (struct btrfs_inode_ref *)ptr; - found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); - if (found_name_len == namelen) { - name_ptr = (unsigned long)(ref + 1); - ret = memcmp_extent_buffer(path->nodes[0], name, - name_ptr, namelen); - if (ret == 0) { - match = 1; - goto out; - } - } - ptr = (unsigned long)(ref + 1) + found_name_len; - } + if (key->type == BTRFS_INODE_EXTREF_KEY) + ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], + path->slots[0], + ref_objectid, + name, namelen); + else + ret = !!btrfs_find_name_in_backref(path->nodes[0], + path->slots[0], + name, namelen); out: btrfs_free_path(path); - return match; + return ret; } static inline int __add_inode_ref(struct btrfs_trans_handle *trans, -- cgit v1.2.3-59-g8ed1b From d3316c8233bb05e0dd855d30aac347bb8ad76ee4 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 25 Sep 2019 14:03:03 +0300 Subject: btrfs: Properly handle backref_in_log retval This function can return a negative error value if btrfs_search_slot errors for whatever reason or if btrfs_alloc_path runs out of memory. This is currently problemattic because backref_in_log is treated by its callers as if it returns boolean. Fix this by adding proper error handling in callers. That also enables the function to return the direct error code from btrfs_search_slot. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1d7f22951ef2..24fa6560655c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -952,7 +952,9 @@ static noinline int backref_in_log(struct btrfs_root *log, return -ENOMEM; ret = btrfs_search_slot(NULL, log, key, path, 0, 0); - if (ret != 0) { + if (ret < 0) { + goto out; + } else if (ret == 1) { ret = 0; goto out; } @@ -1026,10 +1028,13 @@ again: (unsigned long)(victim_ref + 1), victim_name_len); - if (!backref_in_log(log_root, &search_key, - parent_objectid, - victim_name, - victim_name_len)) { + ret = backref_in_log(log_root, &search_key, + parent_objectid, victim_name, + victim_name_len); + if (ret < 0) { + kfree(victim_name); + return ret; + } else if (!ret) { inc_nlink(&inode->vfs_inode); btrfs_release_path(path); @@ -1091,10 +1096,12 @@ again: search_key.offset = btrfs_extref_hash(parent_objectid, victim_name, victim_name_len); - ret = 0; - if (!backref_in_log(log_root, &search_key, - parent_objectid, victim_name, - victim_name_len)) { + ret = backref_in_log(log_root, &search_key, + parent_objectid, victim_name, + victim_name_len); + if (ret < 0) { + return ret; + } else if (!ret) { ret = -ENOENT; victim_parent = read_one_inode(root, parent_objectid); @@ -1869,16 +1876,19 @@ static bool name_in_log_ref(struct btrfs_root *log_root, const u64 dirid, const u64 ino) { struct btrfs_key search_key; + int ret; search_key.objectid = ino; search_key.type = BTRFS_INODE_REF_KEY; search_key.offset = dirid; - if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + ret = backref_in_log(log_root, &search_key, dirid, name, name_len); + if (ret == 1) return true; search_key.type = BTRFS_INODE_EXTREF_KEY; search_key.offset = btrfs_extref_hash(dirid, name, name_len); - if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + ret = backref_in_log(log_root, &search_key, dirid, name, name_len); + if (ret == 1) return true; return false; -- cgit v1.2.3-59-g8ed1b From 725af92a62511aee68216438192041135bf14238 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 30 Aug 2019 17:44:49 +0300 Subject: btrfs: Open-code name_in_log_ref in replay_one_name That function adds unnecessary indirection between backref_in_log and the caller. Furthermore it also "downgrades" backref_in_log's return value to a boolean, when in fact it could very well be an error. Rectify the situation by simply opencoding name_in_log_ref in replay_one_name and properly handling possible return codes from backref_in_log. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba [ update comment ] Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 54 +++++++++++++++++++++++++---------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 24fa6560655c..fa35fb890bf3 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1867,33 +1867,6 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, return ret; } -/* - * Return true if an inode reference exists in the log for the given name, - * inode and parent inode. - */ -static bool name_in_log_ref(struct btrfs_root *log_root, - const char *name, const int name_len, - const u64 dirid, const u64 ino) -{ - struct btrfs_key search_key; - int ret; - - search_key.objectid = ino; - search_key.type = BTRFS_INODE_REF_KEY; - search_key.offset = dirid; - ret = backref_in_log(log_root, &search_key, dirid, name, name_len); - if (ret == 1) - return true; - - search_key.type = BTRFS_INODE_EXTREF_KEY; - search_key.offset = btrfs_extref_hash(dirid, name, name_len); - ret = backref_in_log(log_root, &search_key, dirid, name, name_len); - if (ret == 1) - return true; - - return false; -} - /* * take a single entry in a log directory item and replay it into * the subvolume. @@ -2010,8 +1983,31 @@ out: return ret; insert: - if (name_in_log_ref(root->log_root, name, name_len, - key->objectid, log_key.objectid)) { + /* + * Check if the inode reference exists in the log for the given name, + * inode and parent inode + */ + found_key.objectid = log_key.objectid; + found_key.type = BTRFS_INODE_REF_KEY; + found_key.offset = key->objectid; + ret = backref_in_log(root->log_root, &found_key, 0, name, name_len); + if (ret < 0) { + goto out; + } else if (ret) { + /* The dentry will be added later. */ + ret = 0; + update_size = false; + goto out; + } + + found_key.objectid = log_key.objectid; + found_key.type = BTRFS_INODE_EXTREF_KEY; + found_key.offset = key->objectid; + ret = backref_in_log(root->log_root, &found_key, key->objectid, name, + name_len); + if (ret < 0) { + goto out; + } else if (ret) { /* The dentry will be added later. */ ret = 0; update_size = false; -- cgit v1.2.3-59-g8ed1b From aa6c0df73e3b3fc771d8d8469eb81537f4b46a45 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 2 Oct 2019 18:30:48 +0800 Subject: btrfs: print process name and pid that calls device scanning Its very helpful if we had logged the device scanner process name to debug the race condition between the systemd-udevd scan and the user initiated device forget command. This patch adds process name and pid to the scan message. Signed-off-by: Anand Jain Reviewed-by: David Sterba [ add pid to the message ] Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f7b45e4b7de4..139b73dcfbcc 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1005,11 +1005,15 @@ static noinline struct btrfs_device *device_list_add(const char *path, *new_device_added = true; if (disk_super->label[0]) - pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", - disk_super->label, devid, found_transid, path); + pr_info( + "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", + disk_super->label, devid, found_transid, path, + current->comm, task_pid_nr(current)); else - pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", - disk_super->fsid, devid, found_transid, path); + pr_info( + "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", + disk_super->fsid, devid, found_transid, path, + current->comm, task_pid_nr(current)); } else if (!device->name || strcmp(device->name->str, path)) { /* -- cgit v1.2.3-59-g8ed1b From baf320b9d531f1cfbf64c60dd155ff80a58b3796 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 26 Sep 2019 08:29:32 -0400 Subject: btrfs: use refcount_inc_not_zero in kill_all_nodes We hit the following warning while running down a different problem [ 6197.175850] ------------[ cut here ]------------ [ 6197.185082] refcount_t: underflow; use-after-free. [ 6197.194704] WARNING: CPU: 47 PID: 966 at lib/refcount.c:190 refcount_sub_and_test_checked+0x53/0x60 [ 6197.521792] Call Trace: [ 6197.526687] __btrfs_release_delayed_node+0x76/0x1c0 [ 6197.536615] btrfs_kill_all_delayed_nodes+0xec/0x130 [ 6197.546532] ? __btrfs_btree_balance_dirty+0x60/0x60 [ 6197.556482] btrfs_clean_one_deleted_snapshot+0x71/0xd0 [ 6197.566910] cleaner_kthread+0xfa/0x120 [ 6197.574573] kthread+0x111/0x130 [ 6197.581022] ? kthread_create_on_node+0x60/0x60 [ 6197.590086] ret_from_fork+0x1f/0x30 [ 6197.597228] ---[ end trace 424bb7ae00509f56 ]--- This is because the free side drops the ref without the lock, and then takes the lock if our refcount is 0. So you can have nodes on the tree that have a refcount of 0. Fix this by zero'ing out that element in our temporary array so we don't try to kill it again. CC: stable@vger.kernel.org # 4.14+ Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ add comment ] Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 942cc2a59164..d3e15e1d4a91 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1950,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) } inode_id = delayed_nodes[n - 1]->inode_id + 1; - - for (i = 0; i < n; i++) - refcount_inc(&delayed_nodes[i]->refs); + for (i = 0; i < n; i++) { + /* + * Don't increase refs in case the node is dead and + * about to be removed from the tree in the loop below + */ + if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) + delayed_nodes[i] = NULL; + } spin_unlock(&root->inode_lock); for (i = 0; i < n; i++) { + if (!delayed_nodes[i]) + continue; __btrfs_kill_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]); } -- cgit v1.2.3-59-g8ed1b From 4c66e0d4243bb8829f2c936e966030d967726e90 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 3 Oct 2019 19:09:35 +0200 Subject: btrfs: drop unused parameter is_new from btrfs_iget The parameter is now always set to NULL and could be dropped. The last user was get_default_root but that got reworked in 05dbe6837b60 ("Btrfs: unify subvol= and subvolid= mounting") and the parameter became unused. Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 ++--- fs/btrfs/export.c | 4 ++-- fs/btrfs/file.c | 2 +- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/inode.c | 24 ++++++++++++------------ fs/btrfs/ioctl.c | 2 +- fs/btrfs/props.c | 4 ++-- fs/btrfs/relocation.c | 4 ++-- fs/btrfs/send.c | 2 +- fs/btrfs/super.c | 2 +- fs/btrfs/tree-log.c | 14 ++++++-------- 11 files changed, 31 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 668524097a3c..b23fb083a1d5 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2869,10 +2869,9 @@ int btrfs_drop_inode(struct inode *inode); int __init btrfs_init_cachep(void); void __cold btrfs_destroy_cachep(void); struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new, - struct btrfs_path *path); + struct btrfs_root *root, struct btrfs_path *path); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *was_new); + struct btrfs_root *root); struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 end, int create); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ddf28ecf17f9..72e312cae69d 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(sb, &key, root, NULL); + inode = btrfs_iget(sb, &key, root); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail; @@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL)); + return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 25ce1b6dbda9..f9434fa3e387 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, key.objectid = defrag->ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); + inode = btrfs_iget(fs_info->sb, &key, inode_root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto cleanup; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d54dcd0ab230..85cd874e7b48 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -78,7 +78,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, * sure NOFS is set to keep us from deadlocking. */ nofs_flag = memalloc_nofs_save(); - inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path); + inode = btrfs_iget_path(fs_info->sb, &location, root, path); btrfs_release_path(path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(inode)) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d3f7abf50c91..2ef5f542f58a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2672,7 +2672,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) { srcu_read_unlock(&fs_info->subvol_srcu, index); return 0; @@ -3523,7 +3523,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) found_key.objectid = found_key.offset; found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &found_key, root, NULL); + inode = btrfs_iget(fs_info->sb, &found_key, root); ret = PTR_ERR_OR_ZERO(inode); if (ret && ret != -ENOENT) goto out; @@ -5742,12 +5742,14 @@ static struct inode *btrfs_iget_locked(struct super_block *s, return inode; } -/* Get an inode object given its location and corresponding root. - * Returns in *is_new if the inode was read from disk +/* + * Get an inode object given its location and corresponding root. + * Path can be preallocated to prevent recursing back to iget through + * allocator. NULL is also valid but may require an additional allocation + * later. */ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new, - struct btrfs_path *path) + struct btrfs_root *root, struct btrfs_path *path) { struct inode *inode; @@ -5762,8 +5764,6 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, if (!ret) { inode_tree_add(inode); unlock_new_inode(inode); - if (new) - *new = 1; } else { iget_failed(inode); /* @@ -5781,9 +5781,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, } struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new) + struct btrfs_root *root) { - return btrfs_iget_path(s, location, root, new, NULL); + return btrfs_iget_path(s, location, root, NULL); } static struct inode *new_simple_dir(struct super_block *s, @@ -5849,7 +5849,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return ERR_PTR(ret); if (location.type == BTRFS_INODE_ITEM_KEY) { - inode = btrfs_iget(dir->i_sb, &location, root, NULL); + inode = btrfs_iget(dir->i_sb, &location, root); if (IS_ERR(inode)) return inode; @@ -5874,7 +5874,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) else inode = new_simple_dir(dir->i_sb, &location, sub_root); } else { - inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); + inode = btrfs_iget(dir->i_sb, &location, sub_root); } srcu_read_unlock(&fs_info->subvol_srcu, index); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 23272d9154f3..589b95eb2b80 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2462,7 +2462,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, goto out; } - temp_inode = btrfs_iget(sb, &key2, root, NULL); + temp_inode = btrfs_iget(sb, &key2, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); goto out; diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index 1e664e0b59b8..aac596300c89 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -416,11 +416,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - parent_inode = btrfs_iget(sb, &key, parent_root, NULL); + parent_inode = btrfs_iget(sb, &key, parent_root); if (IS_ERR(parent_inode)) return PTR_ERR(parent_inode); - child_inode = btrfs_iget(sb, &key, root, NULL); + child_inode = btrfs_iget(sb, &key, root); if (IS_ERR(child_inode)) { iput(parent_inode); return PTR_ERR(child_inode); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 5cd42b66818c..df195e2bd45f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3560,7 +3560,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) return -ENOENT; @@ -4246,7 +4246,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); BUG_ON(IS_ERR(inode)); BTRFS_I(inode)->index_cnt = group->key.objectid; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 123ac54af071..27e92594a81b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4779,7 +4779,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) return PTR_ERR(inode); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 843015b9a11e..c7d78ac64b83 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1219,7 +1219,7 @@ static int btrfs_fill_super(struct super_block *sb, key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL); + inode = btrfs_iget(sb, &key, fs_info->fs_root); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail_close; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index fa35fb890bf3..30a17143448d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -559,7 +559,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); + inode = btrfs_iget(root->fs_info->sb, &key, root); if (IS_ERR(inode)) inode = NULL; return inode; @@ -4965,7 +4965,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); /* * If the other inode that had a conflicting dir entry was * deleted in the current transaction, we need to log its parent @@ -4975,8 +4975,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, ret = PTR_ERR(inode); if (ret == -ENOENT) { key.objectid = parent; - inode = btrfs_iget(fs_info->sb, &key, root, - NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); } else { @@ -5681,7 +5680,7 @@ process_leaf: continue; btrfs_release_path(path); - di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); + di_inode = btrfs_iget(fs_info->sb, &di_key, root); if (IS_ERR(di_inode)) { ret = PTR_ERR(di_inode); goto next_dir_inode; @@ -5807,8 +5806,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, cur_offset = item_size; } - dir_inode = btrfs_iget(fs_info->sb, &inode_key, - root, NULL); + dir_inode = btrfs_iget(fs_info->sb, &inode_key, root); /* * If the parent inode was deleted, return an error to * fallback to a transaction commit. This is to prevent @@ -5882,7 +5880,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, search_key.objectid = found_key.offset; search_key.type = BTRFS_INODE_ITEM_KEY; search_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &search_key, root, NULL); + inode = btrfs_iget(fs_info->sb, &search_key, root); if (IS_ERR(inode)) return PTR_ERR(inode); -- cgit v1.2.3-59-g8ed1b From b105e92755b448efd4800e3415dfeae5ad8054e9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 19:57:35 +0200 Subject: btrfs: add __cold attribute to more functions The attribute can mark functions supposed to be called rarely if at all and the text can be moved to sections far from the other code. The attribute has been added to several functions already, this patch is based on hints given by gcc -Wsuggest-attribute=cold. The net effect of this patch is decrease of btrfs.ko by 1000-1300, depending on the config options. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/disk-io.h | 4 ++-- fs/btrfs/super.c | 2 +- fs/btrfs/volumes.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 00954c99d259..307861d0a040 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2577,7 +2577,7 @@ out: return ret; } -int open_ctree(struct super_block *sb, +int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) { @@ -3962,7 +3962,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info) return btrfs_commit_transaction(trans); } -void close_ctree(struct btrfs_fs_info *fs_info) +void __cold close_ctree(struct btrfs_fs_info *fs_info) { int ret; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index a6958103d87e..76f123ebb292 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_clean_tree_block(struct extent_buffer *buf); -int open_ctree(struct super_block *sb, +int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options); -void close_ctree(struct btrfs_fs_info *fs_info); +void __cold close_ctree(struct btrfs_fs_info *fs_info); int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c7d78ac64b83..9c0c53e7d0fc 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -187,7 +187,7 @@ static struct ratelimit_state printk_limits[] = { RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100), }; -void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) +void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; struct va_format vaf; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 139b73dcfbcc..173427731a69 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2052,7 +2052,7 @@ static struct btrfs_device * btrfs_find_next_active_device( * where this function called, there should be always be another device (or * this_dev) which is active. */ -void btrfs_assign_next_active_device(struct btrfs_device *device, +void __cold btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *this_dev) { struct btrfs_fs_info *fs_info = device->fs_info; -- cgit v1.2.3-59-g8ed1b From 4143cb8b6f00910e73a7503fd922211b9f08cf48 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 19:57:37 +0200 Subject: btrfs: add const function attribute For some reason the attribute is called __attribute_const__ and not __const, marks functions that have no observable effects on program state, IOW not reading pointers, just the arguments and calculating a value. Allows the compiler to do some optimizations, based on -Wsuggest-attribute=const . The effects are rather small, though, about 60 bytes decrese of btrfs.ko. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/super.c | 2 +- fs/btrfs/volumes.c | 2 +- fs/btrfs/volumes.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b23fb083a1d5..8d7ac1cd49b4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3141,7 +3141,7 @@ __cold void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, unsigned int line, int errno, const char *fmt, ...); -const char *btrfs_decode_error(int errno); +const char * __attribute_const__ btrfs_decode_error(int errno); __cold void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9c0c53e7d0fc..144933c9b507 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -66,7 +66,7 @@ static struct file_system_type btrfs_root_fs_type; static int btrfs_remount(struct super_block *sb, int *flags, char *data); -const char *btrfs_decode_error(int errno) +const char * __attribute_const__ btrfs_decode_error(int errno) { char *errstr = "unknown"; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 173427731a69..d09371b0cdf9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -297,7 +297,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); -struct list_head *btrfs_get_fs_uuids(void) +struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) { return &fs_uuids; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a7da1f3e3627..0ae0677a8d86 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -571,7 +571,7 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) void btrfs_commit_device_sizes(struct btrfs_transaction *trans); -struct list_head *btrfs_get_fs_uuids(void); +struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info); void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info); bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, -- cgit v1.2.3-59-g8ed1b From e1f60a6580c04d0d2492bb6034e968b8c29c78cf Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 19:57:39 +0200 Subject: btrfs: add __pure attribute to functions The attribute is more relaxed than const and the functions could dereference pointers, as long as the observable state is not changed. We do have such functions, based on -Wsuggest-attribute=pure . The visible effects of this patch are negligible, there are differences in the assembly but hard to summarize. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 6 ++---- fs/btrfs/async-thread.h | 4 ++-- fs/btrfs/ctree.c | 2 +- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/dev-replace.h | 2 +- fs/btrfs/ioctl.c | 2 +- fs/btrfs/space-info.c | 2 +- fs/btrfs/space-info.h | 2 +- 9 files changed, 12 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index b97ae1b03417..1d32a07bb2d1 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -53,14 +53,12 @@ struct btrfs_workqueue { struct __btrfs_workqueue *high; }; -struct btrfs_fs_info * -btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) { return wq->fs_info; } -struct btrfs_fs_info * -btrfs_work_owner(const struct btrfs_work *work) +struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) { return work->wq->fs_info; } diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index c5bf2b117c05..a4434301d84d 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -41,8 +41,8 @@ void btrfs_queue_work(struct btrfs_workqueue *wq, void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); void btrfs_set_work_high_priority(struct btrfs_work *work); -struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); -struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); +struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work); +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); #endif diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f2f9cf1149a4..3a4d8e27e565 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1538,7 +1538,7 @@ static int comp_keys(const struct btrfs_disk_key *disk, /* * same as comp_keys only with two btrfs_key's */ -int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) +int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) { if (k1->objectid > k2->objectid) return 1; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8d7ac1cd49b4..d486703009fa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2508,7 +2508,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int level, int *slot); -int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); +int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type); @@ -2907,7 +2907,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int btrfs_ioctl_get_supported_features(void __user *arg); void btrfs_sync_inode_flags_to_i_flags(struct inode *inode); -int btrfs_is_empty_uuid(u8 *uuid); +int __pure btrfs_is_empty_uuid(u8 *uuid); int btrfs_defrag_file(struct inode *inode, struct file *file, struct btrfs_ioctl_defrag_range_args *range, u64 newer_than, unsigned long max_pages); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 48890826b5e6..f639dde2a679 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data) return 0; } -int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) +int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) { if (!dev_replace->is_valid) return 0; diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 78c5d8f1adda..60b70dacc299 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info); int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info); -int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); +int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); #endif diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 589b95eb2b80..d7a1bd74bb71 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -541,7 +541,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) return 0; } -int btrfs_is_empty_uuid(u8 *uuid) +int __pure btrfs_is_empty_uuid(u8 *uuid) { int i; diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index e8a4b0ebe97f..7539696b4cba 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -10,7 +10,7 @@ #include "transaction.h" #include "block-group.h" -u64 btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, bool may_use_included) { ASSERT(s_info); diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 8867e84aa33d..2d8c811a9792 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -116,7 +116,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, struct btrfs_space_info **space_info); struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, u64 flags); -u64 btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, bool may_use_included); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, -- cgit v1.2.3-59-g8ed1b From 08635bae0b4ceb08fe4c156a11c83baec397d36d Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Jul 2019 12:28:14 -0700 Subject: Btrfs: stop using btrfs_schedule_bio() btrfs_schedule_bio() hands IO off to a helper thread to do the actual submit_bio() call. This has been used to make sure async crc and compression helpers don't get stuck on IO submission. To maintain good performance, over time the IO submission threads duplicated some IO scheduler characteristics such as high and low priority IOs and they also made some ugly assumptions about request allocation batch sizes. All of this cost at least one extra context switch during IO submission, and doesn't fit well with the modern blkmq IO stack. So, this commit stops using btrfs_schedule_bio(). We may need to adjust the number of async helper threads for crcs and compression, but long term it's a better path. Reviewed-by: Josef Bacik Reviewed-by: Nikolay Borisov Signed-off-by: Chris Mason Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 8 ++++---- fs/btrfs/disk-io.c | 7 +++---- fs/btrfs/inode.c | 6 +++--- fs/btrfs/volumes.c | 55 ++++---------------------------------------------- fs/btrfs/volumes.h | 2 +- 5 files changed, 15 insertions(+), 63 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b05b361e2062..ae7d93bfba97 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -378,7 +378,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); @@ -409,7 +409,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); @@ -668,7 +668,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, fs_info->sectorsize); sums += csum_size * nr_sectors; - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); @@ -693,7 +693,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 307861d0a040..bad83867c519 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -791,8 +791,7 @@ static void run_one_async_done(struct btrfs_work *work) return; } - ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, - async->mirror_num, 1); + ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); if (ret) { async->bio->bi_status = ret; bio_endio(async->bio); @@ -892,12 +891,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, BTRFS_WQ_ENDIO_METADATA); if (ret) goto out_w_error; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); } else if (!async) { ret = btree_csum_one_bio(bio); if (ret) goto out_w_error; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); } else { /* * kthread helpers are used to submit writes so that diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2ef5f542f58a..42d1be32b2d9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2108,7 +2108,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, } mapit: - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); out: if (ret) { @@ -7850,7 +7850,7 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode, if (ret) return ret; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); return ret; } @@ -8376,7 +8376,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, goto err; } map: - ret = btrfs_map_bio(fs_info, bio, 0, 0); + ret = btrfs_map_bio(fs_info, bio, 0); err: return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d09371b0cdf9..eb92b4bdbc64 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6415,52 +6415,8 @@ static void btrfs_end_bio(struct bio *bio) } } -/* - * see run_scheduled_bios for a description of why bios are collected for - * async submit. - * - * This will add one bio to the pending list for a device and make sure - * the work struct is scheduled. - */ -static noinline void btrfs_schedule_bio(struct btrfs_device *device, - struct bio *bio) -{ - struct btrfs_fs_info *fs_info = device->fs_info; - int should_queue = 1; - struct btrfs_pending_bios *pending_bios; - - /* don't bother with additional async steps for reads, right now */ - if (bio_op(bio) == REQ_OP_READ) { - btrfsic_submit_bio(bio); - return; - } - - WARN_ON(bio->bi_next); - bio->bi_next = NULL; - - spin_lock(&device->io_lock); - if (op_is_sync(bio->bi_opf)) - pending_bios = &device->pending_sync_bios; - else - pending_bios = &device->pending_bios; - - if (pending_bios->tail) - pending_bios->tail->bi_next = bio; - - pending_bios->tail = bio; - if (!pending_bios->head) - pending_bios->head = bio; - if (device->running_pending) - should_queue = 0; - - spin_unlock(&device->io_lock); - - if (should_queue) - btrfs_queue_work(fs_info->submit_workers, &device->work); -} - static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, - u64 physical, int dev_nr, int async) + u64 physical, int dev_nr) { struct btrfs_device *dev = bbio->stripes[dev_nr].dev; struct btrfs_fs_info *fs_info = bbio->fs_info; @@ -6478,10 +6434,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, btrfs_bio_counter_inc_noblocked(fs_info); - if (async) - btrfs_schedule_bio(dev, bio); - else - btrfsic_submit_bio(bio); + btrfsic_submit_bio(bio); } static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) @@ -6502,7 +6455,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) } blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit) + int mirror_num) { struct btrfs_device *dev; struct bio *first_bio = bio; @@ -6571,7 +6524,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, bio = first_bio; submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, - dev_nr, async_submit); + dev_nr); } btrfs_bio_counter_dec(fs_info); return BLK_STS_OK; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 0ae0677a8d86..bc2bbaa485b7 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -436,7 +436,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type); void btrfs_mapping_tree_free(struct extent_map_tree *tree); blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit); + int mirror_num); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); struct btrfs_device *btrfs_scan_one_device(const char *path, -- cgit v1.2.3-59-g8ed1b From ba8a9d07954397f0645cf62bcc1ef536e8e7ba24 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Jul 2019 12:28:15 -0700 Subject: Btrfs: delete the entire async bio submission framework Now that we're not using btrfs_schedule_bio() anymore, delete all the code that supported it. Reviewed-by: Josef Bacik Reviewed-by: Nikolay Borisov Signed-off-by: Chris Mason Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 - fs/btrfs/disk-io.c | 13 +--- fs/btrfs/super.c | 1 - fs/btrfs/volumes.c | 210 ----------------------------------------------------- fs/btrfs/volumes.h | 11 --- 5 files changed, 1 insertion(+), 235 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d486703009fa..d5c9fbe32c0c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -723,7 +723,6 @@ struct btrfs_fs_info { struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_write_workers; struct btrfs_workqueue *endio_freespace_worker; - struct btrfs_workqueue *submit_workers; struct btrfs_workqueue *caching_workers; struct btrfs_workqueue *readahead_workers; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index bad83867c519..18f80b31f05f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1989,7 +1989,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->rmw_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); - btrfs_destroy_workqueue(fs_info->submit_workers); btrfs_destroy_workqueue(fs_info->delayed_workers); btrfs_destroy_workqueue(fs_info->caching_workers); btrfs_destroy_workqueue(fs_info->readahead_workers); @@ -2154,16 +2153,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->caching_workers = btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); - /* - * a higher idle thresh on the submit workers makes it much more - * likely that bios will be send down in a sane order to the - * devices - */ - fs_info->submit_workers = - btrfs_alloc_workqueue(fs_info, "submit", flags, - min_t(u64, fs_devices->num_devices, - max_active), 64); - fs_info->fixup_workers = btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); @@ -2202,7 +2191,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); if (!(fs_info->workers && fs_info->delalloc_workers && - fs_info->submit_workers && fs_info->flush_workers && + fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_meta_write_workers && fs_info->endio_repair_workers && diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 144933c9b507..d5d15a19f51d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1669,7 +1669,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, btrfs_workqueue_set_max(fs_info->workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); - btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index eb92b4bdbc64..f03ad5375b15 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -397,8 +397,6 @@ static struct btrfs_device *__alloc_device(void) INIT_LIST_HEAD(&dev->dev_alloc_list); INIT_LIST_HEAD(&dev->post_commit_list); - spin_lock_init(&dev->io_lock); - atomic_set(&dev->reada_in_flight, 0); atomic_set(&dev->dev_stats_ccnt, 0); btrfs_device_data_ordered_init(dev); @@ -501,212 +499,6 @@ error: return ret; } -static void requeue_list(struct btrfs_pending_bios *pending_bios, - struct bio *head, struct bio *tail) -{ - - struct bio *old_head; - - old_head = pending_bios->head; - pending_bios->head = head; - if (pending_bios->tail) - tail->bi_next = old_head; - else - pending_bios->tail = tail; -} - -/* - * we try to collect pending bios for a device so we don't get a large - * number of procs sending bios down to the same device. This greatly - * improves the schedulers ability to collect and merge the bios. - * - * But, it also turns into a long list of bios to process and that is sure - * to eventually make the worker thread block. The solution here is to - * make some progress and then put this work struct back at the end of - * the list if the block device is congested. This way, multiple devices - * can make progress from a single worker thread. - */ -static noinline void run_scheduled_bios(struct btrfs_device *device) -{ - struct btrfs_fs_info *fs_info = device->fs_info; - struct bio *pending; - struct backing_dev_info *bdi; - struct btrfs_pending_bios *pending_bios; - struct bio *tail; - struct bio *cur; - int again = 0; - unsigned long num_run; - unsigned long batch_run = 0; - unsigned long last_waited = 0; - int force_reg = 0; - int sync_pending = 0; - struct blk_plug plug; - - /* - * this function runs all the bios we've collected for - * a particular device. We don't want to wander off to - * another device without first sending all of these down. - * So, setup a plug here and finish it off before we return - */ - blk_start_plug(&plug); - - bdi = device->bdev->bd_bdi; - -loop: - spin_lock(&device->io_lock); - -loop_lock: - num_run = 0; - - /* take all the bios off the list at once and process them - * later on (without the lock held). But, remember the - * tail and other pointers so the bios can be properly reinserted - * into the list if we hit congestion - */ - if (!force_reg && device->pending_sync_bios.head) { - pending_bios = &device->pending_sync_bios; - force_reg = 1; - } else { - pending_bios = &device->pending_bios; - force_reg = 0; - } - - pending = pending_bios->head; - tail = pending_bios->tail; - WARN_ON(pending && !tail); - - /* - * if pending was null this time around, no bios need processing - * at all and we can stop. Otherwise it'll loop back up again - * and do an additional check so no bios are missed. - * - * device->running_pending is used to synchronize with the - * schedule_bio code. - */ - if (device->pending_sync_bios.head == NULL && - device->pending_bios.head == NULL) { - again = 0; - device->running_pending = 0; - } else { - again = 1; - device->running_pending = 1; - } - - pending_bios->head = NULL; - pending_bios->tail = NULL; - - spin_unlock(&device->io_lock); - - while (pending) { - - rmb(); - /* we want to work on both lists, but do more bios on the - * sync list than the regular list - */ - if ((num_run > 32 && - pending_bios != &device->pending_sync_bios && - device->pending_sync_bios.head) || - (num_run > 64 && pending_bios == &device->pending_sync_bios && - device->pending_bios.head)) { - spin_lock(&device->io_lock); - requeue_list(pending_bios, pending, tail); - goto loop_lock; - } - - cur = pending; - pending = pending->bi_next; - cur->bi_next = NULL; - - BUG_ON(atomic_read(&cur->__bi_cnt) == 0); - - /* - * if we're doing the sync list, record that our - * plug has some sync requests on it - * - * If we're doing the regular list and there are - * sync requests sitting around, unplug before - * we add more - */ - if (pending_bios == &device->pending_sync_bios) { - sync_pending = 1; - } else if (sync_pending) { - blk_finish_plug(&plug); - blk_start_plug(&plug); - sync_pending = 0; - } - - btrfsic_submit_bio(cur); - num_run++; - batch_run++; - - cond_resched(); - - /* - * we made progress, there is more work to do and the bdi - * is now congested. Back off and let other work structs - * run instead - */ - if (pending && bdi_write_congested(bdi) && batch_run > 8 && - fs_info->fs_devices->open_devices > 1) { - struct io_context *ioc; - - ioc = current->io_context; - - /* - * the main goal here is that we don't want to - * block if we're going to be able to submit - * more requests without blocking. - * - * This code does two great things, it pokes into - * the elevator code from a filesystem _and_ - * it makes assumptions about how batching works. - */ - if (ioc && ioc->nr_batch_requests > 0 && - time_before(jiffies, ioc->last_waited + HZ/50UL) && - (last_waited == 0 || - ioc->last_waited == last_waited)) { - /* - * we want to go through our batch of - * requests and stop. So, we copy out - * the ioc->last_waited time and test - * against it before looping - */ - last_waited = ioc->last_waited; - cond_resched(); - continue; - } - spin_lock(&device->io_lock); - requeue_list(pending_bios, pending, tail); - device->running_pending = 1; - - spin_unlock(&device->io_lock); - btrfs_queue_work(fs_info->submit_workers, - &device->work); - goto done; - } - } - - cond_resched(); - if (again) - goto loop; - - spin_lock(&device->io_lock); - if (device->pending_bios.head || device->pending_sync_bios.head) - goto loop_lock; - spin_unlock(&device->io_lock); - -done: - blk_finish_plug(&plug); -} - -static void pending_bios_fn(struct btrfs_work *work) -{ - struct btrfs_device *device; - - device = container_of(work, struct btrfs_device, work); - run_scheduled_bios(device); -} - static bool device_path_matched(const char *path, struct btrfs_device *device) { int found; @@ -6628,8 +6420,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, else generate_random_uuid(dev->uuid); - btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); - return dev; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index bc2bbaa485b7..9b5fba4d6f6b 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -18,10 +18,6 @@ extern struct mutex uuid_mutex; #define BTRFS_STRIPE_LEN SZ_64K struct buffer_head; -struct btrfs_pending_bios { - struct bio *head; - struct bio *tail; -}; struct btrfs_io_geometry { /* remaining bytes before crossing a stripe */ @@ -68,13 +64,6 @@ struct btrfs_device { u64 generation; - spinlock_t io_lock ____cacheline_aligned; - int running_pending; - /* regular prio bios */ - struct btrfs_pending_bios pending_bios; - /* sync bios */ - struct btrfs_pending_bios pending_sync_bios; - struct block_device *bdev; /* the mode sent to blkdev_get */ -- cgit v1.2.3-59-g8ed1b From 1d53c9e6723022b12e4a5ed4b141f67c834b7f6f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Jul 2019 12:28:16 -0700 Subject: Btrfs: only associate the locked page with one async_chunk struct The btrfs writepages function collects a large range of pages flagged for delayed allocation, and then sends them down through the COW code for processing. When compression is on, we allocate one async_chunk structure for every 512K, and then run those pages through the compression code for IO submission. writepages starts all of this off with a single page, locked by the original call to extent_write_cache_pages(), and it's important to keep track of this page because it has already been through clear_page_dirty_for_io(). The btrfs async_chunk struct has a pointer to the locked_page, and when we're redirtying the page because compression had to fallback to uncompressed IO, we use page->index to decide if a given async_chunk struct really owns that page. But, this is racey. If a given delalloc range is broken up into two async_chunks (chunkA and chunkB), we can end up with something like this: compress_file_range(chunkA) submit_compress_extents(chunkA) submit compressed bios(chunkA) put_page(locked_page) compress_file_range(chunkB) ... Or: async_cow_submit submit_compressed_extents <--- falls back to buffered writeout cow_file_range extent_clear_unlock_delalloc __process_pages_contig put_page(locked_pages) async_cow_submit The end result is that chunkA is completed and cleaned up before chunkB even starts processing. This means we can free locked_page() and reuse it elsewhere. If we get really lucky, it'll have the same page->index in its new home as it did before. While we're processing chunkB, we might decide we need to fall back to uncompressed IO, and so compress_file_range() will call __set_page_dirty_nobufers() on chunkB->locked_page. Without cgroups in use, this creates as a phantom dirty page, which isn't great but isn't the end of the world. What can happen, it can go through the fixup worker and the whole COW machinery again: in submit_compressed_extents(): while (async extents) { ... cow_file_range if (!page_started ...) extent_write_locked_range else if (...) unlock_page continue; This hasn't been observed in practice but is still possible. With cgroups in use, we might crash in the accounting code because page->mapping->i_wb isn't set. BUG: unable to handle kernel NULL pointer dereference at 00000000000000d0 IP: percpu_counter_add_batch+0x11/0x70 PGD 66534e067 P4D 66534e067 PUD 66534f067 PMD 0 Oops: 0000 [#1] SMP DEBUG_PAGEALLOC CPU: 16 PID: 2172 Comm: rm Not tainted RIP: 0010:percpu_counter_add_batch+0x11/0x70 RSP: 0018:ffffc9000a97bbe0 EFLAGS: 00010286 RAX: 0000000000000005 RBX: 0000000000000090 RCX: 0000000000026115 RDX: 0000000000000030 RSI: ffffffffffffffff RDI: 0000000000000090 RBP: 0000000000000000 R08: fffffffffffffff5 R09: 0000000000000000 R10: 00000000000260c0 R11: ffff881037fc26c0 R12: ffffffffffffffff R13: ffff880fe4111548 R14: ffffc9000a97bc90 R15: 0000000000000001 FS: 00007f5503ced480(0000) GS:ffff880ff7200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000d0 CR3: 00000001e0459005 CR4: 0000000000360ee0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: account_page_cleaned+0x15b/0x1f0 __cancel_dirty_page+0x146/0x200 truncate_cleanup_page+0x92/0xb0 truncate_inode_pages_range+0x202/0x7d0 btrfs_evict_inode+0x92/0x5a0 evict+0xc1/0x190 do_unlinkat+0x176/0x280 do_syscall_64+0x63/0x1a0 entry_SYSCALL_64_after_hwframe+0x42/0xb7 The fix here is to make asyc_chunk->locked_page NULL everywhere but the one async_chunk struct that's allowed to do things to the locked page. Link: https://lore.kernel.org/linux-btrfs/c2419d01-5c84-3fb4-189e-4db519d08796@suse.com/ Fixes: 771ed689d2cd ("Btrfs: Optimize compressed writeback and reads") Reviewed-by: Josef Bacik Signed-off-by: Chris Mason [ update changelog from mail thread discussion ] Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- fs/btrfs/inode.c | 25 +++++++++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0f1d917c8bb3..4e550a3e7e34 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1909,7 +1909,7 @@ static int __process_pages_contig(struct address_space *mapping, if (page_ops & PAGE_SET_PRIVATE2) SetPagePrivate2(pages[i]); - if (pages[i] == locked_page) { + if (locked_page && pages[i] == locked_page) { put_page(pages[i]); pages_locked++; continue; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 42d1be32b2d9..861105e11b37 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -712,10 +712,12 @@ cleanup_and_bail_uncompressed: * to our extent and set things up for the async work queue to run * cow_file_range to do the normal delalloc dance. */ - if (page_offset(async_chunk->locked_page) >= start && - page_offset(async_chunk->locked_page) <= end) + if (async_chunk->locked_page && + (page_offset(async_chunk->locked_page) >= start && + page_offset(async_chunk->locked_page)) <= end) { __set_page_dirty_nobuffers(async_chunk->locked_page); /* unlocked later on in the async handlers */ + } if (redirty) extent_range_redirty_for_io(inode, start, end); @@ -795,7 +797,7 @@ retry: async_extent->start + async_extent->ram_size - 1, WB_SYNC_ALL); - else if (ret) + else if (ret && async_chunk->locked_page) unlock_page(async_chunk->locked_page); kfree(async_extent); cond_resched(); @@ -1264,10 +1266,25 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_chunk[i].inode = inode; async_chunk[i].start = start; async_chunk[i].end = cur_end; - async_chunk[i].locked_page = locked_page; async_chunk[i].write_flags = write_flags; INIT_LIST_HEAD(&async_chunk[i].extents); + /* + * The locked_page comes all the way from writepage and its + * the original page we were actually given. As we spread + * this large delalloc region across multiple async_chunk + * structs, only the first struct needs a pointer to locked_page + * + * This way we don't need racey decisions about who is supposed + * to unlock it. + */ + if (locked_page) { + async_chunk[i].locked_page = locked_page; + locked_page = NULL; + } else { + async_chunk[i].locked_page = NULL; + } + btrfs_init_work(&async_chunk[i].work, async_cow_start, async_cow_submit, async_cow_free); -- cgit v1.2.3-59-g8ed1b From ec39f7696ccfac85b3eea41eba7d6f747ee4ce8d Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Jul 2019 12:28:17 -0700 Subject: Btrfs: use REQ_CGROUP_PUNT for worker thread submitted bios Async CRCs and compression submit IO through helper threads, which means they have IO priority inversions when cgroup IO controllers are in use. This flags all of the writes submitted by btrfs helper threads as REQ_CGROUP_PUNT. submit_bio() will punt these to dedicated per-blkcg work items to avoid the priority inversion. For the compression code, we take a reference on the wbc's blkg css and pass it down to the async workers. For the async CRCs, the bio already has the correct css, we just need to tell the block layer to use REQ_CGROUP_PUNT. Reviewed-by: Josef Bacik Signed-off-by: Chris Mason Modified-and-reviewed-by: Tejun Heo Signed-off-by: David Sterba --- fs/btrfs/compression.c | 8 +++++++- fs/btrfs/compression.h | 3 ++- fs/btrfs/disk-io.c | 6 ++++++ fs/btrfs/extent_io.c | 3 +++ fs/btrfs/inode.c | 31 ++++++++++++++++++++++++++++--- 5 files changed, 46 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ae7d93bfba97..d70c46407420 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -311,7 +311,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages, - unsigned int write_flags) + unsigned int write_flags, + struct cgroup_subsys_state *blkcg_css) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio *bio = NULL; @@ -346,6 +347,11 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; + + if (blkcg_css) { + bio->bi_opf |= REQ_CGROUP_PUNT; + bio_associate_blkg_from_css(bio, blkcg_css); + } refcount_set(&cb->pending_bios, 1); /* create and submit bios for the compressed pages */ diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 4cb8be9ff88b..dd392278ab3f 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages, - unsigned int write_flags); + unsigned int write_flags, + struct cgroup_subsys_state *blkcg_css); blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 18f80b31f05f..0bc6e3e9f7c3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -791,6 +791,12 @@ static void run_one_async_done(struct btrfs_work *work) return; } + /* + * All of the bios that pass through here are from async helpers. + * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. + * This changes nothing when cgroups aren't in use. + */ + async->bio->bi_opf |= REQ_CGROUP_PUNT; ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); if (ret) { async->bio->bi_status = ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4e550a3e7e34..16b5d8e02e77 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4250,6 +4250,9 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, .nr_to_write = nr_pages * 2, .range_start = start, .range_end = end + 1, + /* We're called from an async helper function */ + .punt_to_cgroup = 1, + .no_cgroup_owner = 1, }; while (start <= end) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 861105e11b37..7ed0fe2dd5ba 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -368,6 +368,7 @@ struct async_chunk { u64 end; unsigned int write_flags; struct list_head extents; + struct cgroup_subsys_state *blkcg_css; struct btrfs_work work; atomic_t *pending; }; @@ -880,7 +881,8 @@ retry: ins.objectid, ins.offset, async_extent->pages, async_extent->nr_pages, - async_chunk->write_flags)) { + async_chunk->write_flags, + async_chunk->blkcg_css)) { struct page *p = async_extent->pages[0]; const u64 start = async_extent->start; const u64 end = start + async_extent->ram_size - 1; @@ -1198,6 +1200,8 @@ static noinline void async_cow_free(struct btrfs_work *work) async_chunk = container_of(work, struct async_chunk, work); if (async_chunk->inode) btrfs_add_delayed_iput(async_chunk->inode); + if (async_chunk->blkcg_css) + css_put(async_chunk->blkcg_css); /* * Since the pointer to 'pending' is at the beginning of the array of * async_chunk's, freeing it ensures the whole array has been freed. @@ -1206,12 +1210,15 @@ static noinline void async_cow_free(struct btrfs_work *work) kvfree(async_chunk->pending); } -static int cow_file_range_async(struct inode *inode, struct page *locked_page, +static int cow_file_range_async(struct inode *inode, + struct writeback_control *wbc, + struct page *locked_page, u64 start, u64 end, int *page_started, unsigned long *nr_written, unsigned int write_flags) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); struct async_cow *ctx; struct async_chunk *async_chunk; unsigned long nr_pages; @@ -1279,12 +1286,30 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, * to unlock it. */ if (locked_page) { + /* + * Depending on the compressibility, the pages might or + * might not go through async. We want all of them to + * be accounted against wbc once. Let's do it here + * before the paths diverge. wbc accounting is used + * only for foreign writeback detection and doesn't + * need full accuracy. Just account the whole thing + * against the first page. + */ + wbc_account_cgroup_owner(wbc, locked_page, + cur_end - start); async_chunk[i].locked_page = locked_page; locked_page = NULL; } else { async_chunk[i].locked_page = NULL; } + if (blkcg_css != blkcg_root_css) { + css_get(blkcg_css); + async_chunk[i].blkcg_css = blkcg_css; + } else { + async_chunk[i].blkcg_css = NULL; + } + btrfs_init_work(&async_chunk[i].work, async_cow_start, async_cow_submit, async_cow_free); @@ -1727,7 +1752,7 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, } else { set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags); - ret = cow_file_range_async(inode, locked_page, start, end, + ret = cow_file_range_async(inode, wbc, locked_page, start, end, page_started, nr_written, write_flags); } -- cgit v1.2.3-59-g8ed1b From dbb70becde5b28940366ee419e0fdd3e09af44fb Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Jul 2019 12:28:18 -0700 Subject: Btrfs: extent_write_locked_range() should attach inode->i_wb extent_write_locked_range() is used when we're falling back to buffered IO from inside of compression. It allocates its own wbc and should associate it with the inode's i_wb to make sure the IO goes down from the correct cgroup. Reviewed-by: Josef Bacik Signed-off-by: Chris Mason Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 16b5d8e02e77..e88625beb2c0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4255,6 +4255,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, .no_cgroup_owner = 1, }; + wbc_attach_fdatawrite_inode(&wbc_writepages, inode); while (start <= end) { page = find_get_page(mapping, start >> PAGE_SHIFT); if (clear_page_dirty_for_io(page)) @@ -4269,11 +4270,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, } ASSERT(ret <= 0); - if (ret < 0) { + if (ret == 0) + ret = flush_write_bio(&epd); + else end_write_bio(&epd, ret); - return ret; - } - ret = flush_write_bio(&epd); + + wbc_detach_inode(&wbc_writepages); return ret; } -- cgit v1.2.3-59-g8ed1b From 80d7fd1e09822daf8d94d46c6f9a17e7e34cf0b4 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 4 Oct 2019 17:31:32 +0800 Subject: btrfs: tree-checker: Refactor prev_key check for ino into a function Refactor the check for prev_key->objectid of the following key types into one function, check_prev_ino(): - EXTENT_DATA - INODE_REF - DIR_INDEX - DIR_ITEM - XATTR_ITEM Also add the check of prev_key for INODE_REF. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 113 ++++++++++++++++++++++++++++++------------------ 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index fa4848a87430..8b6d4ccd010a 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -125,6 +125,74 @@ static u64 file_extent_end(struct extent_buffer *leaf, return end; } +/* + * Customized report for dir_item, the only new important information is + * key->objectid, which represents inode number + */ +__printf(3, 4) +__cold +static void dir_item_err(const struct extent_buffer *eb, int slot, + const char *fmt, ...) +{ + const struct btrfs_fs_info *fs_info = eb->fs_info; + struct btrfs_key key; + struct va_format vaf; + va_list args; + + btrfs_item_key_to_cpu(eb, &key, slot); + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + btrfs_crit(fs_info, + "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", + btrfs_header_level(eb) == 0 ? "leaf" : "node", + btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, + key.objectid, &vaf); + va_end(args); +} + +/* + * This functions checks prev_key->objectid, to ensure current key and prev_key + * share the same objectid as inode number. + * + * This is to detect missing INODE_ITEM in subvolume trees. + * + * Return true if everything is OK or we don't need to check. + * Return false if anything is wrong. + */ +static bool check_prev_ino(struct extent_buffer *leaf, + struct btrfs_key *key, int slot, + struct btrfs_key *prev_key) +{ + /* No prev key, skip check */ + if (slot == 0) + return true; + + /* Only these key->types needs to be checked */ + ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || + key->type == BTRFS_INODE_REF_KEY || + key->type == BTRFS_DIR_INDEX_KEY || + key->type == BTRFS_DIR_ITEM_KEY || + key->type == BTRFS_EXTENT_DATA_KEY); + + /* + * Only subvolume trees along with their reloc trees need this check. + * Things like log tree doesn't follow this ino requirement. + */ + if (!is_fstree(btrfs_header_owner(leaf))) + return true; + + if (key->objectid == prev_key->objectid) + return true; + + /* Error found */ + dir_item_err(leaf, slot, + "invalid previous key objectid, have %llu expect %llu", + prev_key->objectid, key->objectid); + return false; +} static int check_extent_data_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot, struct btrfs_key *prev_key) @@ -148,13 +216,8 @@ static int check_extent_data_item(struct extent_buffer *leaf, * But if objectids mismatch, it means we have a missing * INODE_ITEM. */ - if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) && - prev_key->objectid != key->objectid) { - file_extent_err(leaf, slot, - "invalid previous key objectid, have %llu expect %llu", - prev_key->objectid, key->objectid); + if (!check_prev_ino(leaf, key, slot, prev_key)) return -EUCLEAN; - } fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); @@ -285,34 +348,6 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, return 0; } -/* - * Customized reported for dir_item, only important new info is key->objectid, - * which represents inode number - */ -__printf(3, 4) -__cold -static void dir_item_err(const struct extent_buffer *eb, int slot, - const char *fmt, ...) -{ - const struct btrfs_fs_info *fs_info = eb->fs_info; - struct btrfs_key key; - struct va_format vaf; - va_list args; - - btrfs_item_key_to_cpu(eb, &key, slot); - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - btrfs_crit(fs_info, - "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", - btrfs_header_level(eb) == 0 ? "leaf" : "node", - btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, - key.objectid, &vaf); - va_end(args); -} - static int check_dir_item(struct extent_buffer *leaf, struct btrfs_key *key, struct btrfs_key *prev_key, int slot) @@ -322,14 +357,8 @@ static int check_dir_item(struct extent_buffer *leaf, u32 item_size = btrfs_item_size_nr(leaf, slot); u32 cur = 0; - /* Same check as in check_extent_data_item() */ - if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) && - prev_key->objectid != key->objectid) { - dir_item_err(leaf, slot, - "invalid previous key objectid, have %llu expect %llu", - prev_key->objectid, key->objectid); + if (!check_prev_ino(leaf, key, slot, prev_key)) return -EUCLEAN; - } di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); while (cur < item_size) { u32 name_len; @@ -1258,6 +1287,8 @@ static int check_inode_ref(struct extent_buffer *leaf, unsigned long ptr; unsigned long end; + if (!check_prev_ino(leaf, key, slot, prev_key)) + return -EUCLEAN; /* namelen can't be 0, so item_size == sizeof() is also invalid */ if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { inode_ref_err(fs_info, leaf, slot, -- cgit v1.2.3-59-g8ed1b From c06631b0d8a4236fb45667de60880b037863de56 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 4 Oct 2019 17:31:33 +0800 Subject: btrfs: Enhance error output for write time tree checker Unlike read time tree checker errors, write time error can't be inspected by "btrfs inspect dump-tree", so we need extra information to determine what's going wrong. The patch will add the following output for write time tree checker error: - The content of the offending tree block To help determining if it's a false alert. - Kernel WARN_ON() for debug build This is helpful for us to detect unexpected write time tree checker error, especially fstests could catch the dmesg. Since the WARN_ON() is only triggered for write time tree checker, test cases utilizing dm-error won't trigger this WARN_ON(), thus no extra noise. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0bc6e3e9f7c3..f54a4782d18c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -545,9 +545,11 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) ret = btrfs_check_leaf_full(eb); if (ret < 0) { + btrfs_print_tree(eb, 0); btrfs_err(fs_info, "block=%llu write time tree block corruption detected", eb->start); + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); return ret; } write_extent_buffer(eb, result, 0, csum_size); -- cgit v1.2.3-59-g8ed1b From a9143bd31c6ab52f6a320a1a4254f2a2934610b1 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Mon, 7 Oct 2019 21:50:38 -0300 Subject: btrfs: block-group: Rework documentation of check_system_chunk function Commit 4617ea3a52cf (" Btrfs: fix necessary chunk tree space calculation when allocating a chunk") removed the is_allocation argument from check_system_chunk, since the formula for reserving the necessary space for allocation or removing a chunk would be the same. So, rework the comment by removing the mention of is_allocation argument. Signed-off-by: Marcos Paulo de Souza Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 53e08f925260..384659dc7818 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2987,9 +2987,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) } /* - * If @is_allocation is true, reserve space in the system space info necessary - * for allocating a chunk, otherwise if it's false, reserve space necessary for - * removing a chunk. + * Reserve space in the system space for allocating or removing a chunk */ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) { -- cgit v1.2.3-59-g8ed1b From f7bddf1e27d18fbc7d3e3056ba449cfbe4e20b0a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 3 Oct 2019 07:27:13 -0700 Subject: btrfs: Avoid getting stuck during cyclic writebacks During a cyclic writeback, extent_write_cache_pages() uses done_index to update the writeback_index after the current run is over. However, instead of current index + 1, it gets to to the current index itself. Unfortunately, this, combined with returning on EOF instead of looping back, can lead to the following pathlogical behavior. 1. There is a single file which has accumulated enough dirty pages to trigger balance_dirty_pages() and the writer appending to the file with a series of short writes. 2. balance_dirty_pages kicks in, wakes up background writeback and sleeps. 3. Writeback kicks in and the cursor is on the last page of the dirty file. Writeback is started or skipped if already in progress. As it's EOF, extent_write_cache_pages() returns and the cursor is set to done_index which is pointing to the last page. 4. Writeback is done. Nothing happens till balance_dirty_pages finishes, at which point we go back to #1. This can almost completely stall out writing back of the file and keep the system over dirty threshold for a long time which can mess up the whole system. We encountered this issue in production with a package handling application which can reliably reproduce the issue when running under tight memory limits. Reading the comment in the error handling section, this seems to be to avoid accidentally skipping a page in case the write attempt on the page doesn't succeed. However, this concern seems bogus. On each page, the code either: * Skips and moves onto the next page. * Fails issue and sets done_index to index + 1. * Successfully issues and continue to the next page if budget allows and not EOF. IOW, as long as it's not EOF and there's budget, the code never retries writing back the same page. Only when a page happens to be the last page of a particular run, we end up retrying the page, which can't possibly guarantee anything data integrity related. Besides, cyclic writes are only used for non-syncing writebacks meaning that there's no data integrity implication to begin with. Fix it by always setting done_index past the current page being processed. Note that this problem exists in other writepages too. CC: stable@vger.kernel.org # 4.19+ Signed-off-by: Tejun Heo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e88625beb2c0..a2f919087fa1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4131,7 +4131,7 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - done_index = page->index; + done_index = page->index + 1; /* * At this point we hold neither the i_pages lock nor * the page lock: the page may be truncated or @@ -4166,16 +4166,6 @@ retry: ret = __extent_writepage(page, wbc, epd); if (ret < 0) { - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - done_index = page->index + 1; done = 1; break; } -- cgit v1.2.3-59-g8ed1b From 67439dadb03ad9da45bfccb4cdb6ef6b1a7f8da9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 8 Oct 2019 13:28:47 +0200 Subject: btrfs: opencode extent_buffer_get The helper is trivial and we can understand what the atomic_inc on something named refs does. Reviewed-by: Johannes Thumshirn Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 12 ++++++------ fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_io.h | 5 ----- fs/btrfs/qgroup.c | 6 +++--- fs/btrfs/relocation.c | 4 ++-- fs/btrfs/tree-log.c | 2 +- 7 files changed, 14 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 3a4d8e27e565..739757978686 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1100,7 +1100,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) parent_start = buf->start; - extent_buffer_get(cow); + atomic_inc(&cow->refs); ret = tree_mod_log_insert_root(root->node, cow, 1); BUG_ON(ret < 0); rcu_assign_pointer(root->node, cow); @@ -2011,7 +2011,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, /* update the path */ if (left) { if (btrfs_header_nritems(left) > orig_slot) { - extent_buffer_get(left); + atomic_inc(&left->refs); /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; @@ -2601,7 +2601,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, } else { b = root->commit_root; - extent_buffer_get(b); + atomic_inc(&b->refs); } level = btrfs_header_level(b); /* @@ -3375,7 +3375,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, free_extent_buffer(old); add_root_to_dirty_list(root); - extent_buffer_get(c); + atomic_inc(&c->refs); path->nodes[level] = c; path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; path->slots[level] = 0; @@ -4908,7 +4908,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, root_sub_used(root, leaf->len); - extent_buffer_get(leaf); + atomic_inc(&leaf->refs); btrfs_free_tree_block(trans, root, leaf, 0, 1); free_extent_buffer_stale(leaf); } @@ -4989,7 +4989,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, * for possible call to del_ptr below */ slot = path->slots[1]; - extent_buffer_get(leaf); + atomic_inc(&leaf->refs); btrfs_set_path_blocking(path); wret = push_leaf_left(trans, root, path, 1, 1, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f54a4782d18c..43e14a17e3f4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -610,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, /* the pending IO might have been the only thing that kept this buffer * in memory. Make sure we have a ref for all this other checks */ - extent_buffer_get(eb); + atomic_inc(&eb->refs); reads_done = atomic_dec_and_test(&eb->io_pages); if (!reads_done) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 49cb26fa7c63..9e5845548b76 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5436,7 +5436,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, btrfs_assert_tree_locked(parent); parent_level = btrfs_header_level(parent); - extent_buffer_get(parent); + atomic_inc(&parent->refs); path->nodes[parent_level] = parent; path->slots[parent_level] = btrfs_header_nritems(parent); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e22045cef89b..a8551a1f56e2 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -230,11 +230,6 @@ static inline int num_extent_pages(const struct extent_buffer *eb) (eb->start >> PAGE_SHIFT); } -static inline void extent_buffer_get(struct extent_buffer *eb) -{ - atomic_inc(&eb->refs); -} - static inline int extent_buffer_uptodate(struct extent_buffer *eb) { return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index fde0973d893a..f414fd914ddd 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1811,7 +1811,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); /* For src_path */ - extent_buffer_get(src_eb); + atomic_inc(&src_eb->refs); src_path->nodes[root_level] = src_eb; src_path->slots[root_level] = dst_path->slots[root_level]; src_path->locks[root_level] = 0; @@ -2067,7 +2067,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, goto out; } /* For dst_path */ - extent_buffer_get(dst_eb); + atomic_inc(&dst_eb->refs); dst_path->nodes[level] = dst_eb; dst_path->slots[level] = 0; dst_path->locks[level] = 0; @@ -2126,7 +2126,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, * walk back up the tree (adjusting slot pointers as we go) * and restart the search process. */ - extent_buffer_get(root_eb); /* For path */ + atomic_inc(&root_eb->refs); /* For path */ path->nodes[root_level] = root_eb; path->slots[root_level] = 0; path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index df195e2bd45f..90b80da5abe6 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2246,7 +2246,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_root_level(root_item); - extent_buffer_get(reloc_root->node); + atomic_inc(&reloc_root->node->refs); path->nodes[level] = reloc_root->node; path->slots[level] = 0; } else { @@ -4688,7 +4688,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, node->new_bytenr != buf->start); drop_node_buffer(node); - extent_buffer_get(cow); + atomic_inc(&cow->refs); node->eb = cow; node->new_bytenr = cow->start; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 30a17143448d..6f757361db53 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2851,7 +2851,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, level = btrfs_header_level(log->node); orig_level = level; path->nodes[level] = log->node; - extent_buffer_get(log->node); + atomic_inc(&log->node->refs); path->slots[level] = 0; while (1) { -- cgit v1.2.3-59-g8ed1b From ce6d3eb6fd5cfede81c7a3d540919f6d92faaac0 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 8 Oct 2019 16:26:16 +0300 Subject: btrfs: User assert to document transaction requirement Using an ASSERT in btrfs_pin_extent allows to more stringently observe whether the function is called under a transaction or not. Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9e5845548b76..569fd2adecaa 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2590,14 +2590,13 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache, return 0; } -/* - * this function must be called within transaction - */ int btrfs_pin_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_block_group_cache *cache; + ASSERT(fs_info->running_transaction); + cache = btrfs_lookup_block_group(fs_info, bytenr); BUG_ON(!cache); /* Logic error */ -- cgit v1.2.3-59-g8ed1b From 8d510121bfbf87302e0594d2022c5e7d52b26f7f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 8 Oct 2019 20:43:06 +0300 Subject: btrfs: Rename btrfs_join_transaction_nolock This function is used only during the final phase of freespace cache writeout. This is necessary since using the plain btrfs_join_transaction api is deadlock prone. The deadlock looks like: T1: btrfs_commit_transaction commit_cowonly_roots btrfs_write_dirty_block_groups btrfs_wait_cache_io __btrfs_wait_cache_io btrfs_wait_ordered_range <-- Triggers ordered IO for freespace inode and blocks transaction commit until freespace cache writeout T2: <-- after T1 has triggered the writeout finish_ordered_fn btrfs_finish_ordered_io btrfs_join_transaction <--- this would block waiting for current transaction to commit, but since trans commit is waiting for this writeout to finish The special purpose functions prevents it by simply skipping the "wait for writeout" since it's guaranteed the transaction won't proceed until we are done. Reviewed-by: Qu Wenruo Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/transaction.c | 2 +- fs/btrfs/transaction.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7ed0fe2dd5ba..3315a6eb09a3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3066,7 +3066,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) int compress_type = 0; int ret = 0; u64 logical_len = ordered_extent->len; - bool nolock; + bool freespace_inode; bool truncated = false; bool range_locked = false; bool clear_new_delalloc_bytes = false; @@ -3077,7 +3077,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) clear_new_delalloc_bytes = true; - nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); + freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { ret = -EIO; @@ -3108,8 +3108,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, ordered_extent->len); btrfs_ordered_update_i_size(inode, 0, ordered_extent); - if (nolock) - trans = btrfs_join_transaction_nolock(root); + if (freespace_inode) + trans = btrfs_join_transaction_spacecache(root); else trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { @@ -3143,8 +3143,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) EXTENT_DEFRAG, 0, 0, &cached_state); } - if (nolock) - trans = btrfs_join_transaction_nolock(root); + if (freespace_inode) + trans = btrfs_join_transaction_spacecache(root); else trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 54b8718054ce..6f133906c862 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -729,7 +729,7 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) true); } -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) +struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK, BTRFS_RESERVE_NO_FLUSH, true); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2ac89fb0d709..49f7196368f5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -183,7 +183,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( unsigned int num_items, int min_factor); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction_barrier( -- cgit v1.2.3-59-g8ed1b From b2cd29596469f16e2a56bcd59d6cbf7f7652131f Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Thu, 10 Oct 2019 15:59:56 +0800 Subject: btrfs: props: remove unnecessary hash_init() DEFINE_HASHTABLE itself has already included initialization code, we don't have to call hash_init() again, so remove it. Signed-off-by: Chengguang Xu Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/props.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index aac596300c89..deb59e7cfcac 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -437,8 +437,6 @@ void __init btrfs_props_init(void) { int i; - hash_init(prop_handlers_ht); - for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) { struct prop_handler *p = &prop_handlers[i]; u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name)); -- cgit v1.2.3-59-g8ed1b From b9b1a53e180e2ab4cea42c95bf344e4bf7f524e7 Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Thu, 10 Oct 2019 15:59:58 +0800 Subject: btrfs: use enum for extent type defines Use enum to replace macro definitions of extent types. Signed-off-by: Chengguang Xu Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 4 ++-- include/uapi/linux/btrfs_tree.h | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 8b6d4ccd010a..9699c91c958e 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -221,11 +221,11 @@ static int check_extent_data_item(struct extent_buffer *leaf, fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { + if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) { file_extent_err(leaf, slot, "invalid type for file extent, have %u expect range [0, %u]", btrfs_file_extent_type(leaf, fi), - BTRFS_FILE_EXTENT_TYPES); + BTRFS_NR_FILE_EXTENT_TYPES - 1); return -EUCLEAN; } diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index b65c7ee75bc7..44136de2f5d7 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -737,10 +737,12 @@ struct btrfs_balance_item { __le64 unused[4]; } __attribute__ ((__packed__)); -#define BTRFS_FILE_EXTENT_INLINE 0 -#define BTRFS_FILE_EXTENT_REG 1 -#define BTRFS_FILE_EXTENT_PREALLOC 2 -#define BTRFS_FILE_EXTENT_TYPES 2 +enum { + BTRFS_FILE_EXTENT_INLINE = 0, + BTRFS_FILE_EXTENT_REG = 1, + BTRFS_FILE_EXTENT_PREALLOC = 2, + BTRFS_NR_FILE_EXTENT_TYPES = 3, +}; struct btrfs_file_extent_item { /* -- cgit v1.2.3-59-g8ed1b From ce96b7ffd11e261ef2ecd6817b7572a77750170d Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Thu, 10 Oct 2019 15:59:57 +0800 Subject: btrfs: use better definition of number of compression type The compression type upper limit constant is the same as the last value and this is confusing. In order to keep coding style consistent, use BTRFS_NR_COMPRESS_TYPES as the total number that follows the idom of 'NR' being one more than the last value. Signed-off-by: Chengguang Xu Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 ++ fs/btrfs/compression.h | 4 ++-- fs/btrfs/ioctl.c | 2 +- fs/btrfs/tree-checker.c | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d70c46407420..93deaf0cc2b8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -39,6 +39,8 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type) case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; + default: + break; } return NULL; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index dd392278ab3f..52dce1176f89 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -105,7 +105,7 @@ enum btrfs_compression_type { BTRFS_COMPRESS_ZLIB = 1, BTRFS_COMPRESS_LZO = 2, BTRFS_COMPRESS_ZSTD = 3, - BTRFS_COMPRESS_TYPES = 3, + BTRFS_NR_COMPRESS_TYPES = 4, }; struct workspace_manager { @@ -163,7 +163,7 @@ struct btrfs_compress_op { }; /* The heuristic workspaces are managed via the 0th workspace manager */ -#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1) +#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES extern const struct btrfs_compress_op btrfs_heuristic_compress; extern const struct btrfs_compress_op btrfs_zlib_compress; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d7a1bd74bb71..5892fa9398c3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1409,7 +1409,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, return -EINVAL; if (do_compress) { - if (range->compress_type > BTRFS_COMPRESS_TYPES) + if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) return -EINVAL; if (range->compress_type) compress_type = range->compress_type; diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 9699c91c958e..f4751615a189 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -233,11 +233,11 @@ static int check_extent_data_item(struct extent_buffer *leaf, * Support for new compression/encryption must introduce incompat flag, * and must be caught in open_ctree(). */ - if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { + if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) { file_extent_err(leaf, slot, "invalid compression for file extent, have %u expect range [0, %u]", btrfs_file_extent_compression(leaf, fi), - BTRFS_COMPRESS_TYPES); + BTRFS_NR_COMPRESS_TYPES - 1); return -EUCLEAN; } if (btrfs_file_extent_encryption(leaf, fi)) { -- cgit v1.2.3-59-g8ed1b From 4273eaff9b8d5e141113a5bdf9628c02acf3afe5 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 10 Oct 2019 10:39:25 +0800 Subject: btrfs: use bool argument in free_root_pointers() We don't need int argument bool shall do in free_root_pointers(). And rename the argument as it confused two people. Reviewed-by: Qu Wenruo Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 43e14a17e3f4..5d32deb42993 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2022,7 +2022,7 @@ static void free_root_extent_buffers(struct btrfs_root *root) } /* helper to cleanup tree roots */ -static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) +static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) { free_root_extent_buffers(info->tree_root); @@ -2031,7 +2031,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) free_root_extent_buffers(info->csum_root); free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); - if (chunk_root) + if (free_chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); } @@ -3320,7 +3320,7 @@ fail_block_groups: btrfs_put_block_group_cache(fs_info); fail_tree_roots: - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: @@ -3352,7 +3352,7 @@ recovery_tree_root: if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) goto fail_tree_roots; - free_root_pointers(fs_info, 0); + free_root_pointers(fs_info, false); /* don't use the log in recovery mode, it won't be valid */ btrfs_set_super_log_root(disk_super, 0); @@ -4046,7 +4046,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) btrfs_free_block_groups(fs_info); clear_bit(BTRFS_FS_OPEN, &fs_info->flags); - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); iput(fs_info->btree_inode); -- cgit v1.2.3-59-g8ed1b From b929c1d8313c80f75979a3d7906a71a640eea171 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Thu, 10 Oct 2019 21:23:11 -0300 Subject: btrfs: ioctl: Try to use btrfs_fs_info instead of *file Some functions are doing some unnecessary indirection to reach the btrfs_fs_info struct. Change these functions to receive a btrfs_fs_info struct instead of a *file. Reviewed-by: Anand Jain Reviewed-by: Nikolay Borisov Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5892fa9398c3..a51f8fbadf41 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -479,10 +479,9 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg) return put_user(inode->i_generation, arg); } -static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) +static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_device *device; struct request_queue *q; struct fstrim_range range; @@ -4952,10 +4951,9 @@ drop_write: return ret; } -static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_quota_rescan_args *qsa; int ret = 0; @@ -4978,11 +4976,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) return ret; } -static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -5154,10 +5150,9 @@ out: return ret; } -static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) +static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); size_t len; int ret; char label[BTRFS_LABEL_SIZE]; @@ -5241,10 +5236,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg) return 0; } -static int btrfs_ioctl_get_features(struct file *file, void __user *arg) +static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags features; @@ -5445,11 +5439,11 @@ long btrfs_ioctl(struct file *file, unsigned int case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case FS_IOC_GETFSLABEL: - return btrfs_ioctl_get_fslabel(file, argp); + return btrfs_ioctl_get_fslabel(fs_info, argp); case FS_IOC_SETFSLABEL: return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: - return btrfs_ioctl_fitrim(file, argp); + return btrfs_ioctl_fitrim(fs_info, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: @@ -5554,15 +5548,15 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_QUOTA_RESCAN: return btrfs_ioctl_quota_rescan(file, argp); case BTRFS_IOC_QUOTA_RESCAN_STATUS: - return btrfs_ioctl_quota_rescan_status(file, argp); + return btrfs_ioctl_quota_rescan_status(fs_info, argp); case BTRFS_IOC_QUOTA_RESCAN_WAIT: - return btrfs_ioctl_quota_rescan_wait(file, argp); + return btrfs_ioctl_quota_rescan_wait(fs_info, argp); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: - return btrfs_ioctl_get_features(file, argp); + return btrfs_ioctl_get_features(fs_info, argp); case BTRFS_IOC_SET_FEATURES: return btrfs_ioctl_set_features(file, argp); case FS_IOC_FSGETXATTR: -- cgit v1.2.3-59-g8ed1b From a0e248bb502d5165b3314ac3819e888fdcdf7d9f Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 11 Oct 2019 16:41:20 +0100 Subject: Btrfs: fix negative subv_writers counter and data space leak after buffered write When doing a buffered write it's possible to leave the subv_writers counter of the root, used for synchronization between buffered nocow writers and snapshotting. This happens in an exceptional case like the following: 1) We fail to allocate data space for the write, since there's not enough available data space nor enough unallocated space for allocating a new data block group; 2) Because of that failure, we try to go to NOCOW mode, which succeeds and therefore we set the local variable 'only_release_metadata' to true and set the root's sub_writers counter to 1 through the call to btrfs_start_write_no_snapshotting() made by check_can_nocow(); 3) The call to btrfs_copy_from_user() returns zero, which is very unlikely to happen but not impossible; 4) No pages are copied because btrfs_copy_from_user() returned zero; 5) We call btrfs_end_write_no_snapshotting() which decrements the root's subv_writers counter to 0; 6) We don't set 'only_release_metadata' back to 'false' because we do it only if 'copied', the value returned by btrfs_copy_from_user(), is greater than zero; 7) On the next iteration of the while loop, which processes the same page range, we are now able to allocate data space for the write (we got enough data space released in the meanwhile); 8) After this if we fail at btrfs_delalloc_reserve_metadata(), because now there isn't enough free metadata space, or in some other place further below (prepare_pages(), lock_and_cleanup_extent_if_need(), btrfs_dirty_pages()), we break out of the while loop with 'only_release_metadata' having a value of 'true'; 9) Because 'only_release_metadata' is 'true' we end up decrementing the root's subv_writers counter to -1 (through a call to btrfs_end_write_no_snapshotting()), and we also end up not releasing the data space previously reserved through btrfs_check_data_free_space(). As a consequence the mechanism for synchronizing NOCOW buffered writes with snapshotting gets broken. Fix this by always setting 'only_release_metadata' to false at the start of each iteration. Fixes: 8257b2dc3c1a ("Btrfs: introduce btrfs_{start, end}_nocow_write() for each subvolume") Fixes: 7ee9e4405f26 ("Btrfs: check if we can nocow if we don't have data space") CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f9434fa3e387..91c98a6d1408 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1636,6 +1636,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, break; } + only_release_metadata = false; sector_offset = pos & (fs_info->sectorsize - 1); reserve_bytes = round_up(write_bytes + sector_offset, fs_info->sectorsize); @@ -1791,7 +1792,6 @@ again: set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_NORESERVE, NULL, NULL, GFP_NOFS); - only_release_metadata = false; } btrfs_drop_pages(pages, num_pages); -- cgit v1.2.3-59-g8ed1b From 536870071dbc4278264f59c9a2f5f447e584d139 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 9 Oct 2019 17:43:59 +0100 Subject: Btrfs: fix metadata space leak on fixup worker failure to set range as delalloc In the fixup worker, if we fail to mark the range as delalloc in the io tree, we must release the previously reserved metadata, as well as update the outstanding extents counter for the inode, otherwise we leak metadata space. In pratice we can't return an error from btrfs_set_extent_delalloc(), which is just a wrapper around __set_extent_bit(), as for most errors __set_extent_bit() does a BUG_ON() (or panics which hits a BUG_ON() as well) and returning an -EEXIST error doesn't happen in this case since the exclusive bits parameter always has a value of 0 through this code path. Nevertheless, just fix the error handling in the fixup worker, in case one day __set_extent_bit() can return an error to this code path. Fixes: f3038ee3a3f101 ("btrfs: Handle btrfs_set_extent_delalloc failure in fixup worker") CC: stable@vger.kernel.org # 4.19+ Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3315a6eb09a3..b943c9770533 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2254,12 +2254,16 @@ again: mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); ClearPageChecked(page); - goto out; + goto out_reserved; } ClearPageChecked(page); set_page_dirty(page); +out_reserved: btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + if (ret) + btrfs_delalloc_release_space(inode, data_reserved, page_start, + PAGE_SIZE, true); out: unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); -- cgit v1.2.3-59-g8ed1b From 3797136b626ad4b6582223660c041efdea8f26b2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 24 Sep 2019 16:50:43 -0400 Subject: btrfs: check page->mapping when loading free space cache While testing 5.2 we ran into the following panic [52238.017028] BUG: kernel NULL pointer dereference, address: 0000000000000001 [52238.105608] RIP: 0010:drop_buffers+0x3d/0x150 [52238.304051] Call Trace: [52238.308958] try_to_free_buffers+0x15b/0x1b0 [52238.317503] shrink_page_list+0x1164/0x1780 [52238.325877] shrink_inactive_list+0x18f/0x3b0 [52238.334596] shrink_node_memcg+0x23e/0x7d0 [52238.342790] ? do_shrink_slab+0x4f/0x290 [52238.350648] shrink_node+0xce/0x4a0 [52238.357628] balance_pgdat+0x2c7/0x510 [52238.365135] kswapd+0x216/0x3e0 [52238.371425] ? wait_woken+0x80/0x80 [52238.378412] ? balance_pgdat+0x510/0x510 [52238.386265] kthread+0x111/0x130 [52238.392727] ? kthread_create_on_node+0x60/0x60 [52238.401782] ret_from_fork+0x1f/0x30 The page we were trying to drop had a page->private, but had no page->mapping and so called drop_buffers, assuming that we had a buffer_head on the page, and then panic'ed trying to deref 1, which is our page->private for data pages. This is happening because we're truncating the free space cache while we're trying to load the free space cache. This isn't supposed to happen, and I'll fix that in a followup patch. However we still shouldn't allow those sort of mistakes to result in messing with pages that do not belong to us. So add the page->mapping check to verify that we still own this page after dropping and re-acquiring the page lock. This page being unlocked as: btrfs_readpage extent_read_full_page __extent_read_full_page __do_readpage if (!nr) unlock_page <-- nr can be 0 only if submit_extent_page returns an error CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik [ add callchain ] Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 85cd874e7b48..2a831eb8a66c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -385,6 +385,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode if (uptodate && !PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); + if (page->mapping != inode->i_mapping) { + btrfs_err(BTRFS_I(inode)->root->fs_info, + "free space cache page truncated"); + io_ctl_drop_pages(io_ctl); + return -EIO; + } if (!PageUptodate(page)) { btrfs_err(BTRFS_I(inode)->root->fs_info, "error reading free space cache"); -- cgit v1.2.3-59-g8ed1b From a60adce85f4bb5c1ef8ffcebadd702cafa2f3696 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 24 Sep 2019 16:50:44 -0400 Subject: btrfs: use btrfs_block_group_cache_done in update_block_group When free'ing extents in a block group we check to see if the block group is not cached, and then cache it if we need to. However we'll just carry on as long as we're loading the cache. This is problematic because we are dirtying the block group here. If we are fast enough we could do a transaction commit and clear the free space cache while we're still loading the space cache in another thread. This truncates the free space inode, which will keep it from loading the space cache. Fix this by using the btrfs_block_group_cache_done helper so that we try to load the space cache unconditionally here, which will result in the caller waiting for the fast caching to complete and keep us from truncating the free space inode. CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Josef Bacik Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 384659dc7818..540a7a63601e 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2661,7 +2661,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * is because we need the unpinning stage to actually add the * space back to the block group, otherwise we will leak space. */ - if (!alloc && cache->cached == BTRFS_CACHE_NO) + if (!alloc && !btrfs_block_group_cache_done(cache)) btrfs_cache_block_group(cache, 1); byte_in_group = bytenr - cache->key.objectid; -- cgit v1.2.3-59-g8ed1b From c4bf665a3197554a696121d20f3bf11d084a6961 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 1 Oct 2019 22:38:34 +0200 Subject: btrfs: export compression and decompression callbacks Export compress_pages, decompress_bio and decompress callbacks for all compression algos. The indirect calls will be replaced by a switch. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 24 ++++++++++++++++++++++++ fs/btrfs/lzo.c | 19 +++++++------------ fs/btrfs/zlib.c | 19 +++++++------------ fs/btrfs/zstd.c | 19 +++++++------------ 4 files changed, 45 insertions(+), 36 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 93deaf0cc2b8..8611a8b3321a 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -29,6 +29,30 @@ #include "extent_io.h" #include "extent_map.h" +int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); + +int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); + +int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int zstd_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); + static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index acad4174f68d..04a6815ea9cb 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -131,13 +131,9 @@ static inline size_t read_compress_length(const char *buf) return le32_to_cpu(dlen); } -static int lzo_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -303,7 +299,7 @@ out: return ret; } -static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0, ret2; @@ -444,10 +440,9 @@ done: return ret; } -static int lzo_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); size_t in_len; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index df1aace5df50..4091f94ba378 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -88,13 +88,9 @@ fail: return ERR_PTR(-ENOMEM); } -static int zlib_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; @@ -228,7 +224,7 @@ out: return ret; } -static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0, ret2; @@ -319,10 +315,9 @@ done: return ret; } -static int zlib_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 764d47b107e5..b3728220c329 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -367,13 +367,9 @@ fail: return ERR_PTR(-ENOMEM); } -static int zstd_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); ZSTD_CStream *stream; @@ -548,7 +544,7 @@ out: return ret; } -static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); struct page **pages_in = cb->compressed_pages; @@ -626,10 +622,9 @@ done: return ret; } -static int zstd_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int zstd_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); ZSTD_DStream *stream; -- cgit v1.2.3-59-g8ed1b From 1e4eb7465438e47cc3718a02d4326dd034f3c0da Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 2 Oct 2019 00:06:15 +0200 Subject: btrfs: switch compression callbacks to direct calls The indirect calls bring some overhead due to spectre vulnerability mitigations. The number of cases is small and below the threshold (10-20) where indirect call would be better. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 77 ++++++++++++++++++++++++++++++++++++++++++++------ fs/btrfs/compression.h | 17 ----------- fs/btrfs/lzo.c | 3 -- fs/btrfs/zlib.c | 3 -- fs/btrfs/zstd.c | 3 -- 5 files changed, 69 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 8611a8b3321a..87bac8f73a99 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -86,6 +86,70 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len) return false; } +static int compression_compress_pages(int type, struct list_head *ws, + struct address_space *mapping, u64 start, struct page **pages, + unsigned long *out_pages, unsigned long *total_in, + unsigned long *total_out) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: + return zlib_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_LZO: + return lzo_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_ZSTD: + return zstd_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. As a sane fallback, return what the + * callers will understand as 'no compression happened'. + */ + return -E2BIG; + } +} + +static int compression_decompress_bio(int type, struct list_head *ws, + struct compressed_bio *cb) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); + case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static int compression_decompress(int type, struct list_head *ws, + unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + static int btrfs_decompress_bio(struct compressed_bio *cb); static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, @@ -1074,10 +1138,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, level = btrfs_compress_set_level(type, level); workspace = get_workspace(type, level); - ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, - start, pages, - out_pages, - total_in, total_out); + ret = compression_compress_pages(type, workspace, mapping, start, pages, + out_pages, total_in, total_out); put_workspace(type, workspace); return ret; } @@ -1103,7 +1165,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) int type = cb->compress_type; workspace = get_workspace(type, 0); - ret = btrfs_compress_op[type]->decompress_bio(workspace, cb); + ret = compression_decompress_bio(type, workspace, cb); put_workspace(type, workspace); return ret; @@ -1121,9 +1183,8 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int ret; workspace = get_workspace(type, 0); - ret = btrfs_compress_op[type]->decompress(workspace, data_in, - dest_page, start_byte, - srclen, destlen); + ret = compression_decompress(type, workspace, data_in, dest_page, + start_byte, srclen, destlen); put_workspace(type, workspace); return ret; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 52dce1176f89..7db14d3166b5 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -140,23 +140,6 @@ struct btrfs_compress_op { void (*free_workspace)(struct list_head *workspace); - int (*compress_pages)(struct list_head *workspace, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out); - - int (*decompress_bio)(struct list_head *workspace, - struct compressed_bio *cb); - - int (*decompress)(struct list_head *workspace, - unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen); - /* Maximum level supported by the compression algorithm */ unsigned int max_level; unsigned int default_level; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 04a6815ea9cb..9417944ec829 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -509,9 +509,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = { .put_workspace = lzo_put_workspace, .alloc_workspace = lzo_alloc_workspace, .free_workspace = lzo_free_workspace, - .compress_pages = lzo_compress_pages, - .decompress_bio = lzo_decompress_bio, - .decompress = lzo_decompress, .max_level = 1, .default_level = 1, }; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 4091f94ba378..8bb6f19ab369 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -420,9 +420,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = { .put_workspace = zlib_put_workspace, .alloc_workspace = zlib_alloc_workspace, .free_workspace = zlib_free_workspace, - .compress_pages = zlib_compress_pages, - .decompress_bio = zlib_decompress_bio, - .decompress = zlib_decompress, .max_level = 9, .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, }; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index b3728220c329..5f17c741d167 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -713,9 +713,6 @@ const struct btrfs_compress_op btrfs_zstd_compress = { .put_workspace = zstd_put_workspace, .alloc_workspace = zstd_alloc_workspace, .free_workspace = zstd_free_workspace, - .compress_pages = zstd_compress_pages, - .decompress_bio = zstd_decompress_bio, - .decompress = zstd_decompress, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; -- cgit v1.2.3-59-g8ed1b From be951045312d963ffeacd6a566a0de87e4784af1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 2 Oct 2019 00:53:31 +0200 Subject: btrfs: compression: attach workspace manager to the ops There's a lot of indirection when the generic code calls into algo-specific callbacks to reach the private workspace manager structure and back to the generic code. To simplify that, export the workspace manager for heuristic, LZO and ZLIB, while ZSTD is going to use it's own manager. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 1 + fs/btrfs/compression.h | 1 + fs/btrfs/lzo.c | 1 + fs/btrfs/zlib.c | 1 + fs/btrfs/zstd.c | 2 ++ 5 files changed, 6 insertions(+) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 87bac8f73a99..e650125b1d2b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -920,6 +920,7 @@ fail: } const struct btrfs_compress_op btrfs_heuristic_compress = { + .workspace_manager = &heuristic_wsm, .init_workspace_manager = heuristic_init_workspace_manager, .cleanup_workspace_manager = heuristic_cleanup_workspace_manager, .get_workspace = heuristic_get_workspace, diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 7db14d3166b5..7091eae063e5 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -140,6 +140,7 @@ struct btrfs_compress_op { void (*free_workspace)(struct list_head *workspace); + struct workspace_manager *workspace_manager; /* Maximum level supported by the compression algorithm */ unsigned int max_level; unsigned int default_level; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 9417944ec829..aff105cc80e7 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -503,6 +503,7 @@ out: } const struct btrfs_compress_op btrfs_lzo_compress = { + .workspace_manager = &wsm, .init_workspace_manager = lzo_init_workspace_manager, .cleanup_workspace_manager = lzo_cleanup_workspace_manager, .get_workspace = lzo_get_workspace, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 8bb6f19ab369..a5e8f0207473 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -414,6 +414,7 @@ next: } const struct btrfs_compress_op btrfs_zlib_compress = { + .workspace_manager = &wsm, .init_workspace_manager = zlib_init_workspace_manager, .cleanup_workspace_manager = zlib_cleanup_workspace_manager, .get_workspace = zlib_get_workspace, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 5f17c741d167..4791e89e43e3 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -707,6 +707,8 @@ finish: } const struct btrfs_compress_op btrfs_zstd_compress = { + /* ZSTD uses own workspace manager */ + .workspace_manager = NULL, .init_workspace_manager = zstd_init_workspace_manager, .cleanup_workspace_manager = zstd_cleanup_workspace_manager, .get_workspace = zstd_get_workspace, -- cgit v1.2.3-59-g8ed1b From 975db48330c492a84d8b29b10ae55a30a03dfe0f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 01:40:58 +0200 Subject: btrfs: compression: let workspace manager init take only the type With the access to the workspace structures, we can look it up together with the compression ops inside the workspace manager init helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 7 ++++--- fs/btrfs/compression.h | 3 +-- fs/btrfs/lzo.c | 2 +- fs/btrfs/zlib.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index e650125b1d2b..6adc7f6857d7 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -862,7 +862,7 @@ static struct workspace_manager heuristic_wsm; static void heuristic_init_workspace_manager(void) { - btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress); + btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); } static void heuristic_cleanup_workspace_manager(void) @@ -937,9 +937,10 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zstd_compress, }; -void btrfs_init_workspace_manager(struct workspace_manager *wsm, - const struct btrfs_compress_op *ops) +void btrfs_init_workspace_manager(int type) { + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; + struct workspace_manager *wsm = ops->workspace_manager; struct list_head *workspace; wsm->ops = ops; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 7091eae063e5..10f82e791769 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -120,8 +120,7 @@ struct workspace_manager { wait_queue_head_t ws_wait; }; -void btrfs_init_workspace_manager(struct workspace_manager *wsm, - const struct btrfs_compress_op *ops); +void btrfs_init_workspace_manager(int type); struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index aff105cc80e7..5b8470041bf6 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -65,7 +65,7 @@ static struct workspace_manager wsm; static void lzo_init_workspace_manager(void) { - btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress); + btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); } static void lzo_cleanup_workspace_manager(void) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index a5e8f0207473..be964128dba3 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -31,7 +31,7 @@ static struct workspace_manager wsm; static void zlib_init_workspace_manager(void) { - btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress); + btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); } static void zlib_cleanup_workspace_manager(void) -- cgit v1.2.3-59-g8ed1b From d551703347263b7f79168e51c2f999cb883b8d65 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 2 Oct 2019 01:08:03 +0200 Subject: btrfs: compression: inline init_workspace_manager Replace loop calling to all algos with a list of direct calls to the init manager callback. When that becomes trivial it is replaced by direct call to the helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 17 ++++++----------- fs/btrfs/compression.h | 3 --- fs/btrfs/lzo.c | 6 ------ fs/btrfs/zlib.c | 6 ------ fs/btrfs/zstd.c | 3 +-- 5 files changed, 7 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6adc7f6857d7..61b9cf095ee5 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -52,6 +52,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb); int zstd_decompress(struct list_head *ws, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); +void zstd_init_workspace_manager(void); static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; @@ -860,11 +861,6 @@ struct heuristic_ws { static struct workspace_manager heuristic_wsm; -static void heuristic_init_workspace_manager(void) -{ - btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); -} - static void heuristic_cleanup_workspace_manager(void) { btrfs_cleanup_workspace_manager(&heuristic_wsm); @@ -921,7 +917,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .init_workspace_manager = heuristic_init_workspace_manager, .cleanup_workspace_manager = heuristic_cleanup_workspace_manager, .get_workspace = heuristic_get_workspace, .put_workspace = heuristic_put_workspace, @@ -937,7 +932,7 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zstd_compress, }; -void btrfs_init_workspace_manager(int type) +static void btrfs_init_workspace_manager(int type) { const struct btrfs_compress_op *ops = btrfs_compress_op[type]; struct workspace_manager *wsm = ops->workspace_manager; @@ -1194,10 +1189,10 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, void __init btrfs_init_compress(void) { - int i; - - for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_compress_op[i]->init_workspace_manager(); + btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_init_workspace_manager(); } void __cold btrfs_exit_compress(void) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 10f82e791769..12a46139c389 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -120,15 +120,12 @@ struct workspace_manager { wait_queue_head_t ws_wait; }; -void btrfs_init_workspace_manager(int type); struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm); struct btrfs_compress_op { - void (*init_workspace_manager)(void); - void (*cleanup_workspace_manager)(void); struct list_head *(*get_workspace)(unsigned int level); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 5b8470041bf6..a55079477edf 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,11 +63,6 @@ struct workspace { static struct workspace_manager wsm; -static void lzo_init_workspace_manager(void) -{ - btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); -} - static void lzo_cleanup_workspace_manager(void) { btrfs_cleanup_workspace_manager(&wsm); @@ -504,7 +499,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .init_workspace_manager = lzo_init_workspace_manager, .cleanup_workspace_manager = lzo_cleanup_workspace_manager, .get_workspace = lzo_get_workspace, .put_workspace = lzo_put_workspace, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index be964128dba3..39f1d0f1b286 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -29,11 +29,6 @@ struct workspace { static struct workspace_manager wsm; -static void zlib_init_workspace_manager(void) -{ - btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); -} - static void zlib_cleanup_workspace_manager(void) { btrfs_cleanup_workspace_manager(&wsm); @@ -415,7 +410,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .init_workspace_manager = zlib_init_workspace_manager, .cleanup_workspace_manager = zlib_cleanup_workspace_manager, .get_workspace = zlib_get_workspace, .put_workspace = zlib_put_workspace, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 4791e89e43e3..8e7a804b05ee 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -168,7 +168,7 @@ static void zstd_calc_ws_mem_sizes(void) } } -static void zstd_init_workspace_manager(void) +void zstd_init_workspace_manager(void) { struct list_head *ws; int i; @@ -709,7 +709,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .init_workspace_manager = zstd_init_workspace_manager, .cleanup_workspace_manager = zstd_cleanup_workspace_manager, .get_workspace = zstd_get_workspace, .put_workspace = zstd_put_workspace, -- cgit v1.2.3-59-g8ed1b From 2dba714390f1ff7a8a37cc8c3b374d71d3e84af7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 01:40:58 +0200 Subject: btrfs: compression: let workspace manager cleanup take only the type With the access to the workspace structures, we can look it up together with the compression ops inside the workspace manager cleanup helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 6 ++++-- fs/btrfs/compression.h | 2 +- fs/btrfs/lzo.c | 2 +- fs/btrfs/zlib.c | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 61b9cf095ee5..6c4dc62b9ab0 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -863,7 +863,7 @@ static struct workspace_manager heuristic_wsm; static void heuristic_cleanup_workspace_manager(void) { - btrfs_cleanup_workspace_manager(&heuristic_wsm); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); } static struct list_head *heuristic_get_workspace(unsigned int level) @@ -960,10 +960,12 @@ static void btrfs_init_workspace_manager(int type) } } -void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) +void btrfs_cleanup_workspace_manager(int type) { + struct workspace_manager *wsman; struct list_head *ws; + wsman = btrfs_compress_op[type]->workspace_manager; while (!list_empty(&wsman->idle_ws)) { ws = wsman->idle_ws.next; list_del(ws); diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 12a46139c389..0deaa8e03836 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -123,7 +123,7 @@ struct workspace_manager { struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); -void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm); +void btrfs_cleanup_workspace_manager(int type); struct btrfs_compress_op { void (*cleanup_workspace_manager)(void); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index a55079477edf..6aa602040506 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -65,7 +65,7 @@ static struct workspace_manager wsm; static void lzo_cleanup_workspace_manager(void) { - btrfs_cleanup_workspace_manager(&wsm); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); } static struct list_head *lzo_get_workspace(unsigned int level) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 39f1d0f1b286..7319e9f3d484 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -31,7 +31,7 @@ static struct workspace_manager wsm; static void zlib_cleanup_workspace_manager(void) { - btrfs_cleanup_workspace_manager(&wsm); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); } static struct list_head *zlib_get_workspace(unsigned int level) -- cgit v1.2.3-59-g8ed1b From 2510307e6c78d1d9a59c85164cbff66f8a9f3fed Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 2 Oct 2019 01:08:03 +0200 Subject: btrfs: compression: inline cleanup_workspace_manager Replace loop calling to all algos with a list of direct calls to the cleanup manager callback. When that becomes trivial it is replaced by direct call to the helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 17 ++++++----------- fs/btrfs/compression.h | 3 --- fs/btrfs/lzo.c | 6 ------ fs/btrfs/zlib.c | 6 ------ fs/btrfs/zstd.c | 3 +-- 5 files changed, 7 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6c4dc62b9ab0..34921a120829 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -53,6 +53,7 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); void zstd_init_workspace_manager(void); +void zstd_cleanup_workspace_manager(void); static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; @@ -861,11 +862,6 @@ struct heuristic_ws { static struct workspace_manager heuristic_wsm; -static void heuristic_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); -} - static struct list_head *heuristic_get_workspace(unsigned int level) { return btrfs_get_workspace(&heuristic_wsm, level); @@ -917,7 +913,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .cleanup_workspace_manager = heuristic_cleanup_workspace_manager, .get_workspace = heuristic_get_workspace, .put_workspace = heuristic_put_workspace, .alloc_workspace = alloc_heuristic_ws, @@ -960,7 +955,7 @@ static void btrfs_init_workspace_manager(int type) } } -void btrfs_cleanup_workspace_manager(int type) +static void btrfs_cleanup_workspace_manager(int type) { struct workspace_manager *wsman; struct list_head *ws; @@ -1199,10 +1194,10 @@ void __init btrfs_init_compress(void) void __cold btrfs_exit_compress(void) { - int i; - - for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_compress_op[i]->cleanup_workspace_manager(); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_cleanup_workspace_manager(); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 0deaa8e03836..cf667e599b70 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -123,11 +123,8 @@ struct workspace_manager { struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); -void btrfs_cleanup_workspace_manager(int type); struct btrfs_compress_op { - void (*cleanup_workspace_manager)(void); - struct list_head *(*get_workspace)(unsigned int level); void (*put_workspace)(struct list_head *ws); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 6aa602040506..6f4619e76c11 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,11 +63,6 @@ struct workspace { static struct workspace_manager wsm; -static void lzo_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); -} - static struct list_head *lzo_get_workspace(unsigned int level) { return btrfs_get_workspace(&wsm, level); @@ -499,7 +494,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .cleanup_workspace_manager = lzo_cleanup_workspace_manager, .get_workspace = lzo_get_workspace, .put_workspace = lzo_put_workspace, .alloc_workspace = lzo_alloc_workspace, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 7319e9f3d484..03c632c7deac 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -29,11 +29,6 @@ struct workspace { static struct workspace_manager wsm; -static void zlib_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); -} - static struct list_head *zlib_get_workspace(unsigned int level) { struct list_head *ws = btrfs_get_workspace(&wsm, level); @@ -410,7 +405,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .cleanup_workspace_manager = zlib_cleanup_workspace_manager, .get_workspace = zlib_get_workspace, .put_workspace = zlib_put_workspace, .alloc_workspace = zlib_alloc_workspace, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 8e7a804b05ee..f575ce77ea3d 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -194,7 +194,7 @@ void zstd_init_workspace_manager(void) } } -static void zstd_cleanup_workspace_manager(void) +void zstd_cleanup_workspace_manager(void) { struct workspace *workspace; int i; @@ -709,7 +709,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .cleanup_workspace_manager = zstd_cleanup_workspace_manager, .get_workspace = zstd_get_workspace, .put_workspace = zstd_put_workspace, .alloc_workspace = zstd_alloc_workspace, -- cgit v1.2.3-59-g8ed1b From d20f395f98959dee592cf05af0bec7ab5b185e5e Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:21:48 +0200 Subject: btrfs: compression: export alloc/free/get/put callbacks of all algos The indirect calls will be replaced by a switch in compression.c. (Switch is faster than indirect calls with when Spectre mitigations are enabled). Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 12 ++++++++++++ fs/btrfs/lzo.c | 8 ++++---- fs/btrfs/zlib.c | 8 ++++---- fs/btrfs/zstd.c | 13 ++++++------- 4 files changed, 26 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 34921a120829..f707db4a9a53 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -36,6 +36,10 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb); int zlib_decompress(struct list_head *ws, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); +struct list_head *zlib_alloc_workspace(unsigned int level); +void zlib_free_workspace(struct list_head *ws); +struct list_head *zlib_get_workspace(unsigned int level); +void zlib_put_workspace(struct list_head *ws); int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, @@ -44,6 +48,10 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb); int lzo_decompress(struct list_head *ws, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); +struct list_head *lzo_alloc_workspace(unsigned int level); +void lzo_free_workspace(struct list_head *ws); +struct list_head *lzo_get_workspace(unsigned int level); +void lzo_put_workspace(struct list_head *ws); int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, @@ -54,6 +62,10 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in, size_t destlen); void zstd_init_workspace_manager(void); void zstd_cleanup_workspace_manager(void); +struct list_head *zstd_alloc_workspace(unsigned int level); +void zstd_free_workspace(struct list_head *ws); +struct list_head *zstd_get_workspace(unsigned int level); +void zstd_put_workspace(struct list_head *ws); static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 6f4619e76c11..18018619c019 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,17 +63,17 @@ struct workspace { static struct workspace_manager wsm; -static struct list_head *lzo_get_workspace(unsigned int level) +struct list_head *lzo_get_workspace(unsigned int level) { return btrfs_get_workspace(&wsm, level); } -static void lzo_put_workspace(struct list_head *ws) +void lzo_put_workspace(struct list_head *ws) { btrfs_put_workspace(&wsm, ws); } -static void lzo_free_workspace(struct list_head *ws) +void lzo_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -83,7 +83,7 @@ static void lzo_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *lzo_alloc_workspace(unsigned int level) +struct list_head *lzo_alloc_workspace(unsigned int level) { struct workspace *workspace; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 03c632c7deac..f2a56e999e5f 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -29,7 +29,7 @@ struct workspace { static struct workspace_manager wsm; -static struct list_head *zlib_get_workspace(unsigned int level) +struct list_head *zlib_get_workspace(unsigned int level) { struct list_head *ws = btrfs_get_workspace(&wsm, level); struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -39,12 +39,12 @@ static struct list_head *zlib_get_workspace(unsigned int level) return ws; } -static void zlib_put_workspace(struct list_head *ws) +void zlib_put_workspace(struct list_head *ws) { btrfs_put_workspace(&wsm, ws); } -static void zlib_free_workspace(struct list_head *ws) +void zlib_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -53,7 +53,7 @@ static void zlib_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zlib_alloc_workspace(unsigned int level) +struct list_head *zlib_alloc_workspace(unsigned int level) { struct workspace *workspace; int workspacesize; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index f575ce77ea3d..2caf08e06e2f 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -91,9 +91,8 @@ static inline struct workspace *list_to_workspace(struct list_head *list) return container_of(list, struct workspace, list); } -static void zstd_free_workspace(struct list_head *ws); -static struct list_head *zstd_alloc_workspace(unsigned int level); - +void zstd_free_workspace(struct list_head *ws); +struct list_head *zstd_alloc_workspace(unsigned int level); /* * zstd_reclaim_timer_fn - reclaim timer * @t: timer @@ -261,7 +260,7 @@ static struct list_head *zstd_find_workspace(unsigned int level) * attempt to allocate a new workspace. If we fail to allocate one due to * memory pressure, go to sleep waiting for the max level workspace to free up. */ -static struct list_head *zstd_get_workspace(unsigned int level) +struct list_head *zstd_get_workspace(unsigned int level) { struct list_head *ws; unsigned int nofs_flag; @@ -302,7 +301,7 @@ again: * isn't set, it is also set here. Only the max level workspace tries and wakes * up waiting workspaces. */ -static void zstd_put_workspace(struct list_head *ws) +void zstd_put_workspace(struct list_head *ws) { struct workspace *workspace = list_to_workspace(ws); @@ -332,7 +331,7 @@ static void zstd_put_workspace(struct list_head *ws) cond_wake_up(&wsm.wait); } -static void zstd_free_workspace(struct list_head *ws) +void zstd_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -341,7 +340,7 @@ static void zstd_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zstd_alloc_workspace(unsigned int level) +struct list_head *zstd_alloc_workspace(unsigned int level) { struct workspace *workspace; -- cgit v1.2.3-59-g8ed1b From 6a0d12724bd2dd1c766769578e221ce1d10a4656 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:36:16 +0200 Subject: btrfs: compression: inline get_workspace Majority of the callbacks is trivial, we don't gain anything by the indirection, so replace them by a switch function. ZLIB needs to adjust level in the callback and ZSTD workspace management is complex, the rest is call to the helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 23 +++++++++++++++-------- fs/btrfs/compression.h | 2 -- fs/btrfs/lzo.c | 6 ------ fs/btrfs/zlib.c | 1 - fs/btrfs/zstd.c | 1 - 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f707db4a9a53..de9b06574f42 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -50,7 +50,6 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in, size_t destlen); struct list_head *lzo_alloc_workspace(unsigned int level); void lzo_free_workspace(struct list_head *ws); -struct list_head *lzo_get_workspace(unsigned int level); void lzo_put_workspace(struct list_head *ws); int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, @@ -874,11 +873,6 @@ struct heuristic_ws { static struct workspace_manager heuristic_wsm; -static struct list_head *heuristic_get_workspace(unsigned int level) -{ - return btrfs_get_workspace(&heuristic_wsm, level); -} - static void heuristic_put_workspace(struct list_head *ws) { btrfs_put_workspace(&heuristic_wsm, ws); @@ -925,7 +919,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .get_workspace = heuristic_get_workspace, .put_workspace = heuristic_put_workspace, .alloc_workspace = alloc_heuristic_ws, .free_workspace = free_heuristic_ws, @@ -1067,7 +1060,21 @@ again: static struct list_head *get_workspace(int type, int level) { - return btrfs_compress_op[type]->get_workspace(level); + struct workspace_manager *wsm; + + wsm = btrfs_compress_op[type]->workspace_manager; + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(wsm, level); + case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); + case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(wsm, level); + case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index cf667e599b70..df7274c1a5d8 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -125,8 +125,6 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); struct btrfs_compress_op { - struct list_head *(*get_workspace)(unsigned int level); - void (*put_workspace)(struct list_head *ws); struct list_head *(*alloc_workspace)(unsigned int level); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 18018619c019..821e5c137971 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,11 +63,6 @@ struct workspace { static struct workspace_manager wsm; -struct list_head *lzo_get_workspace(unsigned int level) -{ - return btrfs_get_workspace(&wsm, level); -} - void lzo_put_workspace(struct list_head *ws) { btrfs_put_workspace(&wsm, ws); @@ -494,7 +489,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .get_workspace = lzo_get_workspace, .put_workspace = lzo_put_workspace, .alloc_workspace = lzo_alloc_workspace, .free_workspace = lzo_free_workspace, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index f2a56e999e5f..7caa468efe94 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -405,7 +405,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .get_workspace = zlib_get_workspace, .put_workspace = zlib_put_workspace, .alloc_workspace = zlib_alloc_workspace, .free_workspace = zlib_free_workspace, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 2caf08e06e2f..c9fe0e2bd107 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -708,7 +708,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .get_workspace = zstd_get_workspace, .put_workspace = zstd_put_workspace, .alloc_workspace = zstd_alloc_workspace, .free_workspace = zstd_free_workspace, -- cgit v1.2.3-59-g8ed1b From bd3a5287cc20e37f1e365be1f742b6c574e3f83c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:42:03 +0200 Subject: btrfs: compression: inline put_workspace Similar to get_workspace, majority of the callbacks is trivial, we don't gain anything by the indirection, so replace them by a switch function. Trivial callback implementations use the helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 24 +++++++++++++++--------- fs/btrfs/compression.h | 2 -- fs/btrfs/lzo.c | 6 ------ fs/btrfs/zlib.c | 6 ------ fs/btrfs/zstd.c | 1 - 5 files changed, 15 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index de9b06574f42..9b9638557e19 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -39,7 +39,6 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in, struct list_head *zlib_alloc_workspace(unsigned int level); void zlib_free_workspace(struct list_head *ws); struct list_head *zlib_get_workspace(unsigned int level); -void zlib_put_workspace(struct list_head *ws); int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, @@ -50,7 +49,6 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in, size_t destlen); struct list_head *lzo_alloc_workspace(unsigned int level); void lzo_free_workspace(struct list_head *ws); -void lzo_put_workspace(struct list_head *ws); int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, @@ -873,11 +871,6 @@ struct heuristic_ws { static struct workspace_manager heuristic_wsm; -static void heuristic_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&heuristic_wsm, ws); -} - static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; @@ -919,7 +912,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .put_workspace = heuristic_put_workspace, .alloc_workspace = alloc_heuristic_ws, .free_workspace = free_heuristic_ws, }; @@ -1112,7 +1104,21 @@ wake: static void put_workspace(int type, struct list_head *ws) { - return btrfs_compress_op[type]->put_workspace(ws); + struct workspace_manager *wsm; + + wsm = btrfs_compress_op[type]->workspace_manager; + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(wsm, ws); + case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(wsm, ws); + case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(wsm, ws); + case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index df7274c1a5d8..b8ed97fc6db4 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -125,8 +125,6 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); struct btrfs_compress_op { - void (*put_workspace)(struct list_head *ws); - struct list_head *(*alloc_workspace)(unsigned int level); void (*free_workspace)(struct list_head *workspace); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 821e5c137971..bbf917c5ff2d 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,11 +63,6 @@ struct workspace { static struct workspace_manager wsm; -void lzo_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&wsm, ws); -} - void lzo_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -489,7 +484,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .put_workspace = lzo_put_workspace, .alloc_workspace = lzo_alloc_workspace, .free_workspace = lzo_free_workspace, .max_level = 1, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 7caa468efe94..610765640c8e 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -39,11 +39,6 @@ struct list_head *zlib_get_workspace(unsigned int level) return ws; } -void zlib_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&wsm, ws); -} - void zlib_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -405,7 +400,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .put_workspace = zlib_put_workspace, .alloc_workspace = zlib_alloc_workspace, .free_workspace = zlib_free_workspace, .max_level = 9, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index c9fe0e2bd107..a346f1187fae 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -708,7 +708,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .put_workspace = zstd_put_workspace, .alloc_workspace = zstd_alloc_workspace, .free_workspace = zstd_free_workspace, .max_level = ZSTD_BTRFS_MAX_LEVEL, -- cgit v1.2.3-59-g8ed1b From 5907a9bb13cea3bbf0c54d6a9a1ccf5edebedeed Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:50:28 +0200 Subject: btrfs: compression: pass type to btrfs_get_workspace We can infer the workspace_manager from type and the type will be used in the following patch to call a common helper for alloc_workspace. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 12 +++++------- fs/btrfs/compression.h | 3 +-- fs/btrfs/zlib.c | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9b9638557e19..ffc94e15d86e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -972,9 +972,9 @@ static void btrfs_cleanup_workspace_manager(int type) * Preallocation makes a forward progress guarantees and we do not return * errors. */ -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, - unsigned int level) +struct list_head *btrfs_get_workspace(int type, unsigned int level) { + struct workspace_manager *wsm; struct list_head *workspace; int cpus = num_online_cpus(); unsigned nofs_flag; @@ -984,6 +984,7 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, wait_queue_head_t *ws_wait; int *free_ws; + wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; @@ -1052,13 +1053,10 @@ again: static struct list_head *get_workspace(int type, int level) { - struct workspace_manager *wsm; - - wsm = btrfs_compress_op[type]->workspace_manager; switch (type) { - case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(wsm, level); + case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); - case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(wsm, level); + case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); default: /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index b8ed97fc6db4..accb1d61df87 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -120,8 +120,7 @@ struct workspace_manager { wait_queue_head_t ws_wait; }; -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, - unsigned int level); +struct list_head *btrfs_get_workspace(int type, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); struct btrfs_compress_op { diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 610765640c8e..5679a2e41a52 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -31,7 +31,7 @@ static struct workspace_manager wsm; struct list_head *zlib_get_workspace(unsigned int level) { - struct list_head *ws = btrfs_get_workspace(&wsm, level); + struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level); struct workspace *workspace = list_entry(ws, struct workspace, list); workspace->level = level; -- cgit v1.2.3-59-g8ed1b From c778df140644142fb7e12b7e468b137721d85890 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:47:39 +0200 Subject: btrfs: compression: inline alloc_workspace Replace indirect calls to alloc_workspace by switch and calls to the specific callbacks. This is mainly to get rid of the indirection due to spectre vulnerability mitigations. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 21 ++++++++++++++++++--- fs/btrfs/compression.h | 2 -- fs/btrfs/lzo.c | 1 - fs/btrfs/zlib.c | 1 - fs/btrfs/zstd.c | 1 - 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ffc94e15d86e..4a8dab961f88 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -912,7 +912,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .alloc_workspace = alloc_heuristic_ws, .free_workspace = free_heuristic_ws, }; @@ -924,6 +923,22 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zstd_compress, }; +static struct list_head *alloc_workspace(int type, unsigned int level) +{ + switch (type) { + case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); + case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); + case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); + case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + static void btrfs_init_workspace_manager(int type) { const struct btrfs_compress_op *ops = btrfs_compress_op[type]; @@ -941,7 +956,7 @@ static void btrfs_init_workspace_manager(int type) * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ - workspace = wsm->ops->alloc_workspace(0); + workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); @@ -1020,7 +1035,7 @@ again: * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - workspace = wsm->ops->alloc_workspace(level); + workspace = alloc_workspace(type, level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index accb1d61df87..8336c2ef6b5a 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -124,8 +124,6 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); struct btrfs_compress_op { - struct list_head *(*alloc_workspace)(unsigned int level); - void (*free_workspace)(struct list_head *workspace); struct workspace_manager *workspace_manager; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index bbf917c5ff2d..39b2cf80f77c 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -484,7 +484,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .alloc_workspace = lzo_alloc_workspace, .free_workspace = lzo_free_workspace, .max_level = 1, .default_level = 1, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 5679a2e41a52..c5dfab3ab082 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -400,7 +400,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .alloc_workspace = zlib_alloc_workspace, .free_workspace = zlib_free_workspace, .max_level = 9, .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index a346f1187fae..9413f741c2f6 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -708,7 +708,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .alloc_workspace = zstd_alloc_workspace, .free_workspace = zstd_free_workspace, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, -- cgit v1.2.3-59-g8ed1b From a3bbd2a9ee3c08b60b3127843866bdc76757c24f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:50:28 +0200 Subject: btrfs: compression: pass type to btrfs_put_workspace We can infer the workspace_manager from type and the type will be used in the following patch to call a common helper for free_workspace. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 13 ++++++------- fs/btrfs/compression.h | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4a8dab961f88..2a77c91c194b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1086,14 +1086,16 @@ static struct list_head *get_workspace(int type, int level) * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ -void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws) +void btrfs_put_workspace(int type, struct list_head *ws) { + struct workspace_manager *wsm; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; + wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; @@ -1117,13 +1119,10 @@ wake: static void put_workspace(int type, struct list_head *ws) { - struct workspace_manager *wsm; - - wsm = btrfs_compress_op[type]->workspace_manager; switch (type) { - case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(wsm, ws); - case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(wsm, ws); - case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(wsm, ws); + case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); default: /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 8336c2ef6b5a..664b029cd5e4 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -121,7 +121,7 @@ struct workspace_manager { }; struct list_head *btrfs_get_workspace(int type, unsigned int level); -void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); +void btrfs_put_workspace(int type, struct list_head *ws); struct btrfs_compress_op { void (*free_workspace)(struct list_head *workspace); -- cgit v1.2.3-59-g8ed1b From 1e002351605db4f2fc4114cbb973d5aead72d006 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 02:57:22 +0200 Subject: btrfs: compression: inline free_workspace Replace indirect calls to free_workspace by switch and calls to the specific callbacks. This is mainly to get rid of the indirection due to spectre vulnerability mitigations. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 21 ++++++++++++++++++--- fs/btrfs/compression.h | 2 -- fs/btrfs/lzo.c | 1 - fs/btrfs/zlib.c | 1 - fs/btrfs/zstd.c | 1 - 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 2a77c91c194b..b2342f99b093 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -912,7 +912,6 @@ fail: const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, - .free_workspace = free_heuristic_ws, }; static const struct btrfs_compress_op * const btrfs_compress_op[] = { @@ -939,6 +938,22 @@ static struct list_head *alloc_workspace(int type, unsigned int level) } } +static void free_workspace(int type, struct list_head *ws) +{ + switch (type) { + case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); + case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); + case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); + case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + static void btrfs_init_workspace_manager(int type) { const struct btrfs_compress_op *ops = btrfs_compress_op[type]; @@ -976,7 +991,7 @@ static void btrfs_cleanup_workspace_manager(int type) while (!list_empty(&wsman->idle_ws)) { ws = wsman->idle_ws.next; list_del(ws); - wsman->ops->free_workspace(ws); + free_workspace(type, ws); atomic_dec(&wsman->total_ws); } } @@ -1111,7 +1126,7 @@ void btrfs_put_workspace(int type, struct list_head *ws) } spin_unlock(ws_lock); - wsm->ops->free_workspace(ws); + free_workspace(type, ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 664b029cd5e4..14057498dcbb 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -124,8 +124,6 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level); void btrfs_put_workspace(int type, struct list_head *ws); struct btrfs_compress_op { - void (*free_workspace)(struct list_head *workspace); - struct workspace_manager *workspace_manager; /* Maximum level supported by the compression algorithm */ unsigned int max_level; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 39b2cf80f77c..aa9cd11f4b78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -484,7 +484,6 @@ out: const struct btrfs_compress_op btrfs_lzo_compress = { .workspace_manager = &wsm, - .free_workspace = lzo_free_workspace, .max_level = 1, .default_level = 1, }; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index c5dfab3ab082..a6c90a003c12 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -400,7 +400,6 @@ next: const struct btrfs_compress_op btrfs_zlib_compress = { .workspace_manager = &wsm, - .free_workspace = zlib_free_workspace, .max_level = 9, .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, }; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 9413f741c2f6..9a4871636c6c 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -708,7 +708,6 @@ finish: const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, - .free_workspace = zstd_free_workspace, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; -- cgit v1.2.3-59-g8ed1b From 0cf252131361cdeebb7dfc88dd8ec825fff0956a Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 4 Oct 2019 03:09:55 +0200 Subject: btrfs: compression: remove ops pointer from workspace_manager We can infer the ops from the type that is now passed to all functions that would need it, this makes workspace_manager::ops redundant and can be removed. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/compression.c | 6 ++---- fs/btrfs/compression.h | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b2342f99b093..53aee0db9d71 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -956,12 +956,10 @@ static void free_workspace(int type, struct list_head *ws) static void btrfs_init_workspace_manager(int type) { - const struct btrfs_compress_op *ops = btrfs_compress_op[type]; - struct workspace_manager *wsm = ops->workspace_manager; + struct workspace_manager *wsm; struct list_head *workspace; - wsm->ops = ops; - + wsm = btrfs_compress_op[type]->workspace_manager; INIT_LIST_HEAD(&wsm->idle_ws); spin_lock_init(&wsm->ws_lock); atomic_set(&wsm->total_ws, 0); diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 14057498dcbb..d253f7aa8ed5 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -109,7 +109,6 @@ enum btrfs_compression_type { }; struct workspace_manager { - const struct btrfs_compress_op *ops; struct list_head idle_ws; spinlock_t ws_lock; /* Number of free workspaces */ -- cgit v1.2.3-59-g8ed1b From d79b7c26b12279038b8b84bfcbe7bb3e53382b3f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 27 Sep 2019 13:23:16 +0300 Subject: btrfs: Speed up btrfs_file_llseek Modifying the file position is done on a per-file basis. This renders holding the inode lock for writing useless and makes the performance of concurrent llseek's abysmal. Fix this by holding the inode for read. This provides protection against concurrent truncates and find_desired_extent already includes proper extent locking for the range which ensures proper locking against concurrent writes. SEEK_CUR and SEEK_END can be done lockessly. The former is synchronized by file::f_lock spinlock. SEEK_END is not synchronized but atomic, but that's OK since there is not guarantee that SEEK_END will always be at the end of the file in the face of tail modifications. This change brings ~82% performance improvement when doing a lot of parallel fseeks. The workload essentially does: for (d=0; d Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 91c98a6d1408..35cfc738f6b7 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3356,13 +3356,14 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em = NULL; struct extent_state *cached_state = NULL; + loff_t i_size = inode->i_size; u64 lockstart; u64 lockend; u64 start; u64 len; int ret = 0; - if (inode->i_size == 0) + if (i_size == 0 || *offset >= i_size) return -ENXIO; /* @@ -3372,8 +3373,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) start = max_t(loff_t, 0, *offset); lockstart = round_down(start, fs_info->sectorsize); - lockend = round_up(i_size_read(inode), - fs_info->sectorsize); + lockend = round_up(i_size, fs_info->sectorsize); if (lockend <= lockstart) lockend = lockstart + fs_info->sectorsize; lockend--; @@ -3382,7 +3382,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); - while (start < inode->i_size) { + while (start < i_size) { em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); @@ -3406,10 +3406,10 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) } free_extent_map(em); if (!ret) { - if (whence == SEEK_DATA && start >= inode->i_size) + if (whence == SEEK_DATA && start >= i_size) ret = -ENXIO; else - *offset = min_t(loff_t, start, inode->i_size); + *offset = min_t(loff_t, start, i_size); } unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); @@ -3421,7 +3421,6 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) struct inode *inode = file->f_mapping->host; int ret; - inode_lock(inode); switch (whence) { case SEEK_END: case SEEK_CUR: @@ -3429,21 +3428,16 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) goto out; case SEEK_DATA: case SEEK_HOLE: - if (offset >= i_size_read(inode)) { - inode_unlock(inode); - return -ENXIO; - } - + inode_lock_shared(inode); ret = find_desired_extent(inode, &offset, whence); - if (ret) { - inode_unlock(inode); + inode_unlock_shared(inode); + + if (ret) return ret; - } } offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: - inode_unlock(inode); return offset; } -- cgit v1.2.3-59-g8ed1b From 2034f3b470ccfd4103737cf4d2a25a52ce9ad002 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 27 Sep 2019 13:23:17 +0300 Subject: btrfs: Simplify btrfs_file_llseek Handle SEEK_END/SEEK_CUR in a single 'default' case by directly returning from generic_file_llseek. This makes the 'out' label redundant. Finally return directly the vale from vfs_setpos. No semantic changes. Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 35cfc738f6b7..949212289c89 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3422,10 +3422,8 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) int ret; switch (whence) { - case SEEK_END: - case SEEK_CUR: - offset = generic_file_llseek(file, offset, whence); - goto out; + default: + return generic_file_llseek(file, offset, whence); case SEEK_DATA: case SEEK_HOLE: inode_lock_shared(inode); @@ -3436,9 +3434,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) return ret; } - offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); -out: - return offset; + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); } static int btrfs_file_open(struct inode *inode, struct file *filp) -- cgit v1.2.3-59-g8ed1b From bc80230e0e7bc779cd553c2326ea5b3a5bac303b Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 27 Sep 2019 13:23:18 +0300 Subject: btrfs: Return offset from find_desired_extent Instead of using an input pointer parameter as the return value and have an int as the return type of find_desired_extent, rework the function to directly return the found offset. Doing that the 'ret' variable in btrfs_llseek_file can be removed. Additional (subjective) benefit is that btrfs' llseek function now resemebles those of the other major filesystems. Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 949212289c89..32e620981485 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3351,7 +3351,8 @@ out: return ret; } -static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) +static loff_t find_desired_extent(struct inode *inode, loff_t offset, + int whence) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em = NULL; @@ -3363,14 +3364,14 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) u64 len; int ret = 0; - if (i_size == 0 || *offset >= i_size) + if (i_size == 0 || offset >= i_size) return -ENXIO; /* - * *offset can be negative, in this case we start finding DATA/HOLE from + * offset can be negative, in this case we start finding DATA/HOLE from * the very start of the file. */ - start = max_t(loff_t, 0, *offset); + start = max_t(loff_t, 0, offset); lockstart = round_down(start, fs_info->sectorsize); lockend = round_up(i_size, fs_info->sectorsize); @@ -3405,21 +3406,23 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) cond_resched(); } free_extent_map(em); - if (!ret) { + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state); + if (ret) { + offset = ret; + } else { if (whence == SEEK_DATA && start >= i_size) - ret = -ENXIO; + offset = -ENXIO; else - *offset = min_t(loff_t, start, i_size); + offset = min_t(loff_t, start, i_size); } - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state); - return ret; + + return offset; } static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; - int ret; switch (whence) { default: @@ -3427,13 +3430,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) case SEEK_DATA: case SEEK_HOLE: inode_lock_shared(inode); - ret = find_desired_extent(inode, &offset, whence); + offset = find_desired_extent(inode, offset, whence); inode_unlock_shared(inode); - - if (ret) - return ret; + break; } + if (offset < 0) + return offset; + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); } -- cgit v1.2.3-59-g8ed1b From 94c3f6c6b804c85c91aae0494c205ec7c81ce8d1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 17 Oct 2019 13:28:55 +0200 Subject: btrfs: tracepoints: drop typecasts from printk Remove typecasts from trace printk, adjust types and move typecast to the assignment if necessary. When assigning, the types are more obvious compared to matching the variables to the format strings. Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 8ca7401bc2fb..671bf866a459 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( blkcnt_t, blocks ) + __field( u64, blocks ) __field( u64, disk_i_size ) __field( u64, generation ) __field( u64, last_trans ) @@ -194,7 +194,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, show_root_type(__entry->root_objectid), __entry->generation, __entry->ino, - (unsigned long long)__entry->blocks, + __entry->blocks, __entry->disk_i_size, __entry->last_trans, __entry->logged_trans) @@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) - __field( pgoff_t, writeback_index ) + __field( unsigned long, writeback_index ) __field( u64, root_objectid ) ), @@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->range_start, __entry->range_end, __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, - (unsigned long)__entry->writeback_index) + __entry->writeback_index) ); DEFINE_EVENT(btrfs__writepage, __extent_writepage, @@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( pgoff_t, index ) + __field( unsigned long, index ) __field( u64, start ) __field( u64, end ) __field( int, uptodate ) @@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu " "end=%llu uptodate=%d", show_root_type(__entry->root_objectid), - __entry->ino, (unsigned long)__entry->index, + __entry->ino, __entry->index, __entry->start, __entry->end, __entry->uptodate) ); @@ -1325,17 +1325,17 @@ TRACE_EVENT(alloc_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) __field(gfp_t, mask) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, __entry->mask = mask, - __entry->ip = IP + __entry->ip = (const void *)IP ), TP_printk("state=%p mask=%s caller=%pS", __entry->state, - show_gfp_flags(__entry->mask), (const void *)__entry->ip) + show_gfp_flags(__entry->mask), __entry->ip) ); TRACE_EVENT(free_extent_state, @@ -1346,16 +1346,15 @@ TRACE_EVENT(free_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, - __entry->ip = IP + __entry->ip = (const void *)IP ), - TP_printk("state=%p caller=%pS", __entry->state, - (const void *)__entry->ip) + TP_printk("state=%p caller=%pS", __entry->state, __entry->ip) ); DECLARE_EVENT_CLASS(btrfs__work, @@ -1567,8 +1566,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, ), TP_printk_btrfs("bytenr=%llu num_bytes=%llu", - (unsigned long long)__entry->bytenr, - (unsigned long long)__entry->num_bytes) + __entry->bytenr, __entry->num_bytes) ); DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, -- cgit v1.2.3-59-g8ed1b From 1d2e7c7c3ed73cc510a4dc093df2a935092ff5ad Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 17 Oct 2019 13:28:57 +0200 Subject: btrfs: tracepoints: constify all pointers We don't modify the data passed to tracepoints, some of the declarations are already const, add it to the rest. Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 52 ++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 671bf866a459..522d1f2b13e3 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -292,7 +292,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, TRACE_EVENT(btrfs_handle_em_exist, - TP_PROTO(struct btrfs_fs_info *fs_info, + TP_PROTO(const struct btrfs_fs_info *fs_info, const struct extent_map *existing, const struct extent_map *map, u64 start, u64 len), @@ -330,8 +330,8 @@ TRACE_EVENT(btrfs_handle_em_exist, /* file extent item */ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start), @@ -385,8 +385,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, DECLARE_EVENT_CLASS( btrfs__file_extent_item_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start), @@ -426,8 +426,8 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -435,8 +435,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -444,8 +444,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -453,8 +453,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -1018,7 +1018,7 @@ TRACE_EVENT(btrfs_cow_block, TRACE_EVENT(btrfs_space_reservation, - TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val, + TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val, u64 bytes, int reserve), TP_ARGS(fs_info, type, val, bytes, reserve), @@ -1051,7 +1051,7 @@ TRACE_EVENT(btrfs_space_reservation, TRACE_EVENT(btrfs_trigger_flush, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, - int flush, char *reason), + int flush, const char *reason), TP_ARGS(fs_info, flags, bytes, flush, reason), @@ -1642,7 +1642,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent, TRACE_EVENT(qgroup_update_counters, TP_PROTO(const struct btrfs_fs_info *fs_info, - struct btrfs_qgroup *qgroup, + const struct btrfs_qgroup *qgroup, u64 cur_old_count, u64 cur_new_count), TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count), @@ -1823,7 +1823,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert, ); TRACE_EVENT(btrfs_inode_mod_outstanding_extents, - TP_PROTO(struct btrfs_root *root, u64 ino, int mod), + TP_PROTO(const struct btrfs_root *root, u64 ino, int mod), TP_ARGS(root, ino, mod), @@ -1904,7 +1904,7 @@ TRACE_EVENT(btrfs_set_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1943,7 +1943,7 @@ TRACE_EVENT(btrfs_clear_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1983,7 +1983,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -2092,8 +2092,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); DECLARE_EVENT_CLASS(btrfs__space_info_update, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff), @@ -2115,16 +2115,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update, DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); -- cgit v1.2.3-59-g8ed1b From 9d6cb1b0f9dfefac9cf2f62c8582aa892770b34e Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 18 Oct 2019 11:58:20 +0200 Subject: btrfs: raid56: reduce indentation in lock_stripe_add In lock_stripe_add() we're traversing the stripe hash list and check if the current list element's raid_map equals is equal to the raid bio's raid_map. If both are equal we continue processing. If we'd check for inequality instead of equality we can reduce one level of indentation. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/raid56.c | 91 +++++++++++++++++++++++++++---------------------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 8f47a85944eb..d3fc55f8846e 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -682,62 +682,59 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { - if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { - spin_lock(&cur->bio_list_lock); - - /* can we steal this cached rbio's pages? */ - if (bio_list_empty(&cur->bio_list) && - list_empty(&cur->plug_list) && - test_bit(RBIO_CACHE_BIT, &cur->flags) && - !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { - list_del_init(&cur->hash_list); - refcount_dec(&cur->refs); - - steal_rbio(cur, rbio); - cache_drop = cur; - spin_unlock(&cur->bio_list_lock); + if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) + continue; - goto lockit; - } + spin_lock(&cur->bio_list_lock); - /* can we merge into the lock owner? */ - if (rbio_can_merge(cur, rbio)) { - merge_rbio(cur, rbio); - spin_unlock(&cur->bio_list_lock); - freeit = rbio; - ret = 1; - goto out; - } + /* Can we steal this cached rbio's pages? */ + if (bio_list_empty(&cur->bio_list) && + list_empty(&cur->plug_list) && + test_bit(RBIO_CACHE_BIT, &cur->flags) && + !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { + list_del_init(&cur->hash_list); + refcount_dec(&cur->refs); + steal_rbio(cur, rbio); + cache_drop = cur; + spin_unlock(&cur->bio_list_lock); - /* - * we couldn't merge with the running - * rbio, see if we can merge with the - * pending ones. We don't have to - * check for rmw_locked because there - * is no way they are inside finish_rmw - * right now - */ - list_for_each_entry(pending, &cur->plug_list, - plug_list) { - if (rbio_can_merge(pending, rbio)) { - merge_rbio(pending, rbio); - spin_unlock(&cur->bio_list_lock); - freeit = rbio; - ret = 1; - goto out; - } - } + goto lockit; + } - /* no merging, put us on the tail of the plug list, - * our rbio will be started with the currently - * running rbio unlocks - */ - list_add_tail(&rbio->plug_list, &cur->plug_list); + /* Can we merge into the lock owner? */ + if (rbio_can_merge(cur, rbio)) { + merge_rbio(cur, rbio); spin_unlock(&cur->bio_list_lock); + freeit = rbio; ret = 1; goto out; } + + + /* + * We couldn't merge with the running rbio, see if we can merge + * with the pending ones. We don't have to check for rmw_locked + * because there is no way they are inside finish_rmw right now + */ + list_for_each_entry(pending, &cur->plug_list, plug_list) { + if (rbio_can_merge(pending, rbio)) { + merge_rbio(pending, rbio); + spin_unlock(&cur->bio_list_lock); + freeit = rbio; + ret = 1; + goto out; + } + } + + /* + * No merging, put us on the tail of the plug list, our rbio + * will be started with the currently running rbio unlocks + */ + list_add_tail(&rbio->plug_list, &cur->plug_list); + spin_unlock(&cur->bio_list_lock); + ret = 1; + goto out; } lockit: refcount_inc(&rbio->refs); -- cgit v1.2.3-59-g8ed1b From 721860d5780c64d0757a205413c7eecddfd89039 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 18 Oct 2019 11:58:21 +0200 Subject: btrfs: remove pointless local variable in lock_stripe_add() In lock_stripe_add() we're caching the bucket for the stripe hash table just for a single call to dereference the stripe hash. If we just directly call rbio_bucket() we can safe the pointless local variable. Also move the dereferencing of the stripe hash outside of the variable declaration block to not break over the 80 characters limit. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/raid56.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index d3fc55f8846e..a8e53c8e7b01 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -671,8 +671,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) */ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) { - int bucket = rbio_bucket(rbio); - struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; + struct btrfs_stripe_hash *h; struct btrfs_raid_bio *cur; struct btrfs_raid_bio *pending; unsigned long flags; @@ -680,6 +679,8 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) struct btrfs_raid_bio *cache_drop = NULL; int ret = 0; + h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio); + spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) -- cgit v1.2.3-59-g8ed1b From 5ae216929069507efce3caff6f5618ec919cad11 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 18 Oct 2019 11:58:22 +0200 Subject: btrfs: reduce indentation in btrfs_may_alloc_data_chunk In btrfs_may_alloc_data_chunk() we're checking if the chunk type is of type BTRFS_BLOCK_GROUP_DATA and if it is we process it. Instead of checking if the chunk type is a BTRFS_BLOCK_GROUP_DATA chunk we can negate the check and bail out early if it isn't. This makes the code a bit more readable. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f03ad5375b15..d040b479e053 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2982,27 +2982,28 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, chunk_type = cache->flags; btrfs_put_block_group(cache); - if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { - spin_lock(&fs_info->data_sinfo->lock); - bytes_used = fs_info->data_sinfo->bytes_used; - spin_unlock(&fs_info->data_sinfo->lock); - - if (!bytes_used) { - struct btrfs_trans_handle *trans; - int ret; - - trans = btrfs_join_transaction(fs_info->tree_root); - if (IS_ERR(trans)) - return PTR_ERR(trans); - - ret = btrfs_force_chunk_alloc(trans, - BTRFS_BLOCK_GROUP_DATA); - btrfs_end_transaction(trans); - if (ret < 0) - return ret; - return 1; - } + if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) + return 0; + + spin_lock(&fs_info->data_sinfo->lock); + bytes_used = fs_info->data_sinfo->bytes_used; + spin_unlock(&fs_info->data_sinfo->lock); + + if (!bytes_used) { + struct btrfs_trans_handle *trans; + int ret; + + trans = btrfs_join_transaction(fs_info->tree_root); + if (IS_ERR(trans)) + return PTR_ERR(trans); + + ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); + btrfs_end_transaction(trans); + if (ret < 0) + return ret; + return 1; } + return 0; } -- cgit v1.2.3-59-g8ed1b From 32ab3d1b4d0c55daea9ba52c0f5d1363be21bf2a Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 18 Oct 2019 11:58:23 +0200 Subject: btrfs: remove pointless indentation in btrfs_read_sys_array() Instead of checking if we've read a BTRFS_CHUNK_ITEM_KEY from disk and then process it we could just bail out early if the read disk key wasn't a BTRFS_CHUNK_ITEM_KEY. This removes a level of indentation and makes the code nicer to read. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 71 +++++++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d040b479e053..735627537f45 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6806,48 +6806,49 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) sb_array_offset += len; cur_offset += len; - if (key.type == BTRFS_CHUNK_ITEM_KEY) { - chunk = (struct btrfs_chunk *)sb_array_offset; - /* - * At least one btrfs_chunk with one stripe must be - * present, exact stripe count check comes afterwards - */ - len = btrfs_chunk_item_size(1); - if (cur_offset + len > array_size) - goto out_short_read; - - num_stripes = btrfs_chunk_num_stripes(sb, chunk); - if (!num_stripes) { - btrfs_err(fs_info, - "invalid number of stripes %u in sys_array at offset %u", - num_stripes, cur_offset); - ret = -EIO; - break; - } + if (key.type != BTRFS_CHUNK_ITEM_KEY) { + btrfs_err(fs_info, + "unexpected item type %u in sys_array at offset %u", + (u32)key.type, cur_offset); + ret = -EIO; + break; + } - type = btrfs_chunk_type(sb, chunk); - if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { - btrfs_err(fs_info, - "invalid chunk type %llu in sys_array at offset %u", - type, cur_offset); - ret = -EIO; - break; - } + chunk = (struct btrfs_chunk *)sb_array_offset; + /* + * At least one btrfs_chunk with one stripe must be present, + * exact stripe count check comes afterwards + */ + len = btrfs_chunk_item_size(1); + if (cur_offset + len > array_size) + goto out_short_read; - len = btrfs_chunk_item_size(num_stripes); - if (cur_offset + len > array_size) - goto out_short_read; + num_stripes = btrfs_chunk_num_stripes(sb, chunk); + if (!num_stripes) { + btrfs_err(fs_info, + "invalid number of stripes %u in sys_array at offset %u", + num_stripes, cur_offset); + ret = -EIO; + break; + } - ret = read_one_chunk(&key, sb, chunk); - if (ret) - break; - } else { + type = btrfs_chunk_type(sb, chunk); + if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { btrfs_err(fs_info, - "unexpected item type %u in sys_array at offset %u", - (u32)key.type, cur_offset); + "invalid chunk type %llu in sys_array at offset %u", + type, cur_offset); ret = -EIO; break; } + + len = btrfs_chunk_item_size(num_stripes); + if (cur_offset + len > array_size) + goto out_short_read; + + ret = read_one_chunk(&key, sb, chunk); + if (ret) + break; + array_ptr += len; sb_array_offset += len; cur_offset += len; -- cgit v1.2.3-59-g8ed1b From c3e14909d3b399d49d8dfcf3edbcd5463a7eed6f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 10 May 2019 17:48:30 +0200 Subject: btrfs: assert extent_map bdevs and lookup_map and split This is a preparatory patch for removing extent_map::bdev. There's some history behind the code so this is only precaution to catch if things break before the actual removal happens. Logically, comparing a raw low-level block device (bdev) does not make sense for extent maps (high-level objects). This had no effect in practice but was quite confusing in the code. The lookup_map is set iff EXTENT_FLAG_FS_MAPPING is set. The two pointers were stored in the same bytes and used potentially in two meanings. Now they're split, so the asserts are in place to check that the condition will not change. The lookup map pointer misused bdev, this has been changed in commit 95617d69326c ("btrfs: cleanup, stop casting for extent_map->lookup everywhere") to the explicit type. But the semantics hasn't changed and bdev was not actually used to decide if maps are mergeable. Signed-off-by: David Sterba --- fs/btrfs/extent_map.c | 9 ++++++++- fs/btrfs/extent_map.h | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 9d30acca55e1..9f99dccbc3ca 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -214,9 +214,16 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ASSERT(next->block_start != EXTENT_MAP_DELALLOC && prev->block_start != EXTENT_MAP_DELALLOC); + if (prev->map_lookup || next->map_lookup) + ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) && + test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags)); + + if (prev->bdev || next->bdev) + ASSERT(prev->bdev == next->bdev); + if (extent_map_end(prev) == next->start && prev->flags == next->flags && - prev->bdev == next->bdev && + prev->map_lookup == next->map_lookup && ((next->block_start == EXTENT_MAP_HOLE && prev->block_start == EXTENT_MAP_HOLE) || (next->block_start == EXTENT_MAP_INLINE && diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 473f039fcd7c..3eb9c596b445 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -42,7 +42,7 @@ struct extent_map { u64 block_len; u64 generation; unsigned long flags; - union { + struct { struct block_device *bdev; /* -- cgit v1.2.3-59-g8ed1b From 8530c37a70df71ea8a04fbafabba0b92f4475c6c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 30 Aug 2019 15:42:07 +0200 Subject: btrfs: get bdev from latest_dev for dio bh_result To remove use of extent_map::bdev we need to find a replacement, and the latest_bdev is the only one we can use here, because inode::i_bdev and superblock::s_bdev are NULL. The DIO code uses bdev in two places: * to read blocksize to perform alignment checks in do_blockdev_direct_IO, but we do them in btrfs code before any call to DIO * in the following call chain: do_direct_IO get_more_blocks sdio->get_block() <-- this is btrfs_get_blocks_direct subsequently the map_bh->b_dev member is used in clean_bdev_aliases and dio_new_bio to set the bio's bdev to that of the buffer_head. However, because we have provided a submit function dio_bio_submit calls our submission function and ignores the bdev. So it's safe to pass any valid bdev that's used within the filesystem. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/inode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b943c9770533..5edc6a27ea4a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7662,6 +7662,8 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em, struct inode *inode, u64 start, u64 len) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + if (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) return -ENOENT; @@ -7671,7 +7673,7 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em, bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; - bh_result->b_bdev = em->bdev; + bh_result->b_bdev = fs_info->fs_devices->latest_bdev; set_buffer_mapped(bh_result); return 0; @@ -7754,7 +7756,7 @@ skip_cow: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; - bh_result->b_bdev = em->bdev; + bh_result->b_bdev = fs_info->fs_devices->latest_bdev; set_buffer_mapped(bh_result); if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) -- cgit v1.2.3-59-g8ed1b From 3951e7f050ac6a38bbc859fc3cd6093890c31d1c Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 7 Oct 2019 11:11:01 +0200 Subject: btrfs: add xxhash64 to checksumming algorithms Add xxhash64 to the list of possible checksumming algorithms used by BTRFS. Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/Kconfig | 1 + fs/btrfs/ctree.c | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/super.c | 1 + include/uapi/linux/btrfs_tree.h | 1 + 5 files changed, 5 insertions(+) diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 38651fae7f21..6d5a01c57da3 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -5,6 +5,7 @@ config BTRFS_FS select CRYPTO select CRYPTO_CRC32C select LIBCRC32C + select CRYPTO_XXHASH select ZLIB_INFLATE select ZLIB_DEFLATE select LZO_COMPRESS diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 739757978686..7463c121791d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -34,6 +34,7 @@ static const struct btrfs_csums { const char *name; } btrfs_csums[] = { [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, + [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, }; int btrfs_super_csum_size(const struct btrfs_super_block *s) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5d32deb42993..82a2fa755b13 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -352,6 +352,7 @@ static bool btrfs_supported_super_csum(u16 csum_type) { switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: + case BTRFS_CSUM_TYPE_XXHASH: return true; default: return false; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d5d15a19f51d..c91cdf123cd8 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2462,3 +2462,4 @@ module_exit(exit_btrfs_fs) MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); +MODULE_SOFTDEP("pre: xxhash64"); diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 44136de2f5d7..962312939a8f 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -302,6 +302,7 @@ /* csum types */ enum btrfs_csum_type { BTRFS_CSUM_TYPE_CRC32 = 0, + BTRFS_CSUM_TYPE_XXHASH = 1, }; /* -- cgit v1.2.3-59-g8ed1b From 3831bf0094abed51e71cbeca8b6edf8b88c2644b Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 7 Oct 2019 11:11:02 +0200 Subject: btrfs: add sha256 to checksumming algorithm Add sha256 to the list of possible checksumming algorithms used by BTRFS. Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/Kconfig | 1 + fs/btrfs/ctree.c | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/super.c | 1 + include/uapi/linux/btrfs_tree.h | 1 + 5 files changed, 5 insertions(+) diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 6d5a01c57da3..75b6d10c9845 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -6,6 +6,7 @@ config BTRFS_FS select CRYPTO_CRC32C select LIBCRC32C select CRYPTO_XXHASH + select CRYPTO_SHA256 select ZLIB_INFLATE select ZLIB_DEFLATE select LZO_COMPRESS diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 7463c121791d..bd6ea433ed65 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -35,6 +35,7 @@ static const struct btrfs_csums { } btrfs_csums[] = { [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, + [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, }; int btrfs_super_csum_size(const struct btrfs_super_block *s) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 82a2fa755b13..21fab336f8ca 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -353,6 +353,7 @@ static bool btrfs_supported_super_csum(u16 csum_type) switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: case BTRFS_CSUM_TYPE_XXHASH: + case BTRFS_CSUM_TYPE_SHA256: return true; default: return false; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c91cdf123cd8..36440336c08b 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2463,3 +2463,4 @@ module_exit(exit_btrfs_fs) MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); MODULE_SOFTDEP("pre: xxhash64"); +MODULE_SOFTDEP("pre: sha256"); diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 962312939a8f..e2823c9b2061 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -303,6 +303,7 @@ enum btrfs_csum_type { BTRFS_CSUM_TYPE_CRC32 = 0, BTRFS_CSUM_TYPE_XXHASH = 1, + BTRFS_CSUM_TYPE_SHA256 = 2, }; /* -- cgit v1.2.3-59-g8ed1b From f7cea56c0fff95bd5a6cd21b9fa299f66193b604 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 7 Oct 2019 11:11:03 +0200 Subject: btrfs: sysfs: export supported checksums Export supported checksum algorithms via sysfs in the list of static features: /sys/fs/btrfs/features/supported_checksums Space spearated list of checksum algorithm names. Co-developed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 5 +++++ fs/btrfs/ctree.h | 2 ++ fs/btrfs/sysfs.c | 22 ++++++++++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index bd6ea433ed65..c1561097e5a3 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -53,6 +53,11 @@ const char *btrfs_super_csum_name(u16 csum_type) return btrfs_csums[csum_type].name; } +size_t __const btrfs_get_num_csums(void) +{ + return ARRAY_SIZE(btrfs_csums); +} + struct btrfs_path *btrfs_alloc_path(void) { return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d5c9fbe32c0c..b17944995e09 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2163,6 +2163,8 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, int btrfs_super_csum_size(const struct btrfs_super_block *s); const char *btrfs_super_csum_name(u16 csum_type); +size_t __const btrfs_get_num_csums(void); + /* * The leaf data grows from end-to-front in the node. diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index f6d3c80f2e28..41eb6a313bf8 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -295,8 +295,30 @@ static ssize_t rmdir_subvol_show(struct kobject *kobj, } BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show); +static ssize_t supported_checksums_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + ssize_t ret = 0; + int i; + + for (i = 0; i < btrfs_get_num_csums(); i++) { + /* + * This "trick" only works as long as 'enum btrfs_csum_type' has + * no holes in it + */ + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s%s", + (i == 0 ? "" : " "), btrfs_super_csum_name(i)); + + } + + ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); + return ret; +} +BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show); + static struct attribute *btrfs_supported_static_feature_attrs[] = { BTRFS_ATTR_PTR(static_feature, rmdir_subvol), + BTRFS_ATTR_PTR(static_feature, supported_checksums), NULL }; -- cgit v1.2.3-59-g8ed1b From 41e6d2a808ad89d34c662ba8dd1209c4fefb1416 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 7 Oct 2019 11:11:04 +0200 Subject: btrfs: sysfs: show used checksum driver per filesystem Show the used driver for the checksum algorithm for the filesystem in sysfs file /sys/fs/btrfs/UUID/features/checksum, eg. crc32c (crc32c-generic) Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 41eb6a313bf8..98979dd048a8 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" @@ -626,6 +627,19 @@ static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj, BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show); +static ssize_t btrfs_checksum_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + struct btrfs_fs_info *fs_info = to_fs_info(kobj); + u16 csum_type = btrfs_super_csum_type(fs_info->super_copy); + + return snprintf(buf, PAGE_SIZE, "%s (%s)\n", + btrfs_super_csum_name(csum_type), + crypto_shash_driver_name(fs_info->csum_shash)); +} + +BTRFS_ATTR(, checksum, btrfs_checksum_show); + static const struct attribute *btrfs_attrs[] = { BTRFS_ATTR_PTR(, label), BTRFS_ATTR_PTR(, nodesize), @@ -633,6 +647,7 @@ static const struct attribute *btrfs_attrs[] = { BTRFS_ATTR_PTR(, clone_alignment), BTRFS_ATTR_PTR(, quota_override), BTRFS_ATTR_PTR(, metadata_uuid), + BTRFS_ATTR_PTR(, checksum), NULL, }; -- cgit v1.2.3-59-g8ed1b From b4e967be431cf37f56cd1993592943007d7ab03b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 8 Oct 2019 18:41:33 +0200 Subject: btrfs: add member for a specific checksum driver Currently all the checksum algorithms generate a fixed size digest size and we use it. The on-disk format can hold up to BTRFS_CSUM_SIZE bytes and BLAKE2b produces digest of 512 bits by default. We can't do that and will use the blake2b-256, this needs to be passed to the crypto API. Separate that from the base algorithm name and add a member to request specific driver, in this case with the digest size. The only place that uses the driver name is the crypto API setup. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 12 ++++++++++++ fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 6 +++--- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c1561097e5a3..c211216b4524 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -32,6 +32,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, static const struct btrfs_csums { u16 size; const char *name; + const char *driver; } btrfs_csums[] = { [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, @@ -53,6 +54,17 @@ const char *btrfs_super_csum_name(u16 csum_type) return btrfs_csums[csum_type].name; } +/* + * Return driver name if defined, otherwise the name that's also a valid driver + * name + */ +const char *btrfs_super_csum_driver(u16 csum_type) +{ + /* csum type is validated at mount time */ + return btrfs_csums[csum_type].driver ?: + btrfs_csums[csum_type].name; +} + size_t __const btrfs_get_num_csums(void) { return ARRAY_SIZE(btrfs_csums); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b17944995e09..4a842c0fa062 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2163,6 +2163,7 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, int btrfs_super_csum_size(const struct btrfs_super_block *s); const char *btrfs_super_csum_name(u16 csum_type); +const char *btrfs_super_csum_driver(u16 csum_type); size_t __const btrfs_get_num_csums(void); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 21fab336f8ca..948ed5ac0794 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2219,13 +2219,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) { struct crypto_shash *csum_shash; - const char *csum_name = btrfs_super_csum_name(csum_type); + const char *csum_driver = btrfs_super_csum_driver(csum_type); - csum_shash = crypto_alloc_shash(csum_name, 0, 0); + csum_shash = crypto_alloc_shash(csum_driver, 0, 0); if (IS_ERR(csum_shash)) { btrfs_err(fs_info, "error allocating %s hash for checksum", - csum_name); + csum_driver); return PTR_ERR(csum_shash); } -- cgit v1.2.3-59-g8ed1b From 352ae07b599a03b08a2149d9c2ff9c3479591af2 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 7 Oct 2019 11:11:02 +0200 Subject: btrfs: add blake2b to checksumming algorithms Add blake2b (with 256 bit digest) to the list of possible checksumming algorithms used by BTRFS. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 2 ++ fs/btrfs/disk-io.c | 1 + fs/btrfs/super.c | 1 + include/uapi/linux/btrfs_tree.h | 1 + 4 files changed, 5 insertions(+) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c211216b4524..5b6e86aaf2e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -37,6 +37,8 @@ static const struct btrfs_csums { [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, + [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", + .driver = "blake2b-256" }, }; int btrfs_super_csum_size(const struct btrfs_super_block *s) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 948ed5ac0794..7c345c4bc817 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -354,6 +354,7 @@ static bool btrfs_supported_super_csum(u16 csum_type) case BTRFS_CSUM_TYPE_CRC32: case BTRFS_CSUM_TYPE_XXHASH: case BTRFS_CSUM_TYPE_SHA256: + case BTRFS_CSUM_TYPE_BLAKE2: return true; default: return false; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 36440336c08b..792bb0bc9a0b 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2464,3 +2464,4 @@ MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); MODULE_SOFTDEP("pre: xxhash64"); MODULE_SOFTDEP("pre: sha256"); +MODULE_SOFTDEP("pre: blake2b-256"); diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index e2823c9b2061..5160be1d7332 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -304,6 +304,7 @@ enum btrfs_csum_type { BTRFS_CSUM_TYPE_CRC32 = 0, BTRFS_CSUM_TYPE_XXHASH = 1, BTRFS_CSUM_TYPE_SHA256 = 2, + BTRFS_CSUM_TYPE_BLAKE2 = 3, }; /* -- cgit v1.2.3-59-g8ed1b From 34b127aecd4fe8e6a3903e10f204a7b7ffddca22 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 24 Oct 2019 09:38:29 +0800 Subject: btrfs: Remove btrfs_bio::flags member The last user of btrfs_bio::flags was removed in commit 326e1dbb5736 ("block: remove management of bi_remaining when restoring original bi_end_io"), remove it. (Tagged for stable as the structure is heavily used and space savings are desirable.) CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.h | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 9b5fba4d6f6b..ac4ba8c57283 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -319,7 +319,6 @@ struct btrfs_bio { u64 map_type; /* get from map_lookup->type */ bio_end_io_t *end_io; struct bio *orig_bio; - unsigned long flags; void *private; atomic_t error; int max_errors; -- cgit v1.2.3-59-g8ed1b From bf38be65f3703d5ef3661c0a2802bc28e76b8f19 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:11 +0200 Subject: btrfs: move block_group_item::used to block group For unknown reasons, the member 'used' in the block group struct is stored in the b-tree item and accessed everywhere using the special accessor helper. Let's unify it and make it a regular member and only update the item before writing it to the tree. The item is still being used for flags and chunk_objectid, there's some duplication until the item is removed in following patches. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 50 ++++++++++++++++++++++++++------------------ fs/btrfs/block-group.h | 1 + fs/btrfs/extent-tree.c | 3 +-- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/ioctl.c | 3 +-- fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 2 +- fs/btrfs/space-info.c | 2 +- fs/btrfs/sysfs.c | 2 +- fs/btrfs/volumes.c | 4 ++-- include/trace/events/btrfs.h | 5 ++--- 11 files changed, 42 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 540a7a63601e..4da5e0f6cb82 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -656,8 +656,7 @@ static noinline void caching_thread(struct btrfs_work *work) spin_lock(&block_group->space_info->lock); spin_lock(&block_group->lock); - bytes_used = block_group->key.offset - - btrfs_block_group_used(&block_group->item); + bytes_used = block_group->key.offset - block_group->used; block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); @@ -762,8 +761,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); - bytes_used = cache->key.offset - - btrfs_block_group_used(&cache->item); + bytes_used = cache->key.offset - cache->used; cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); @@ -1209,7 +1207,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) } num_bytes = cache->key.offset - cache->reserved - cache->pinned - - cache->bytes_super - btrfs_block_group_used(&cache->item); + cache->bytes_super - cache->used; sinfo_used = btrfs_space_info_used(sinfo, true); /* @@ -1278,8 +1276,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) down_write(&space_info->groups_sem); spin_lock(&block_group->lock); if (block_group->reserved || block_group->pinned || - btrfs_block_group_used(&block_group->item) || - block_group->ro || + block_group->used || block_group->ro || list_is_singular(&block_group->list)) { /* * We want to bail if we made new allocations or have @@ -1719,6 +1716,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) need_clear = 1; while (1) { + struct btrfs_block_group_item bgi; + ret = find_first_block_group(info, path, &key); if (ret > 0) break; @@ -1750,9 +1749,12 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) cache->disk_cache_state = BTRFS_DC_CLEAR; } - read_extent_buffer(leaf, &cache->item, + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, path->slots[0]), - sizeof(cache->item)); + sizeof(bgi)); + /* Duplicate as the item is still partially used */ + memcpy(&cache->item, &bgi, sizeof(bgi)); + cache->used = btrfs_block_group_used(&bgi); cache->flags = btrfs_block_group_flags(&cache->item); if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && @@ -1791,11 +1793,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) * the space in and be done with it. This saves us _a_lot_ of * time, particularly in the full case. */ - if (found_key.offset == btrfs_block_group_used(&cache->item)) { + if (found_key.offset == cache->used) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; btrfs_free_excluded_extents(cache); - } else if (btrfs_block_group_used(&cache->item) == 0) { + } else if (cache->used == 0) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, found_key.objectid, @@ -1813,7 +1815,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) trace_btrfs_add_block_group(info, cache, 0); btrfs_update_space_info(info, cache->flags, found_key.offset, - btrfs_block_group_used(&cache->item), + cache->used, cache->bytes_super, &space_info); cache->space_info = space_info; @@ -1823,7 +1825,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(info, cache->key.objectid)) { inc_block_group_ro(cache, 1); - } else if (btrfs_block_group_used(&cache->item) == 0) { + } else if (cache->used == 0) { ASSERT(list_empty(&cache->bg_list)); btrfs_mark_bg_unused(cache); } @@ -1877,7 +1879,12 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) goto next; spin_lock(&block_group->lock); + /* + * Copy partially filled item from the cache and ovewrite used + * that has the correct value + */ memcpy(&item, &block_group->item, sizeof(item)); + btrfs_set_block_group_used(&item, block_group->used); memcpy(&key, &block_group->key, sizeof(key)); spin_unlock(&block_group->lock); @@ -1910,7 +1917,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, if (!cache) return -ENOMEM; - btrfs_set_block_group_used(&cache->item, bytes_used); + cache->used = bytes_used; btrfs_set_block_group_chunk_objectid(&cache->item, BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_block_group_flags(&cache->item, type); @@ -2102,8 +2109,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_lock(&cache->lock); if (!--cache->ro) { num_bytes = cache->key.offset - cache->reserved - - cache->pinned - cache->bytes_super - - btrfs_block_group_used(&cache->item); + cache->pinned - cache->bytes_super - cache->used; sinfo->bytes_readonly -= num_bytes; list_del_init(&cache->ro_list); } @@ -2120,6 +2126,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root = fs_info->extent_root; unsigned long bi; struct extent_buffer *leaf; + struct btrfs_block_group_item bgi; ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); if (ret) { @@ -2130,7 +2137,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); - write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); + /* Partial copy of item, update the rest from memory */ + memcpy(&bgi, &cache->item, sizeof(bgi)); + btrfs_set_block_group_used(&bgi, cache->used); + write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); btrfs_mark_buffer_dirty(leaf); fail: btrfs_release_path(path); @@ -2674,11 +2684,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, cache->disk_cache_state < BTRFS_DC_CLEAR) cache->disk_cache_state = BTRFS_DC_CLEAR; - old_val = btrfs_block_group_used(&cache->item); + old_val = cache->used; num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { old_val += num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); + cache->used = old_val; cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; cache->space_info->bytes_used += num_bytes; @@ -2687,7 +2697,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, spin_unlock(&cache->space_info->lock); } else { old_val -= num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); + cache->used = old_val; cache->pinned += num_bytes; btrfs_space_info_update_bytes_pinned(info, cache->space_info, num_bytes); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index c391800388dd..8fa4a70228ee 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -50,6 +50,7 @@ struct btrfs_block_group_cache { spinlock_t lock; u64 pinned; u64 reserved; + u64 used; u64 delalloc_bytes; u64 bytes_super; u64 flags; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 569fd2adecaa..e3fd4b0ca905 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5498,8 +5498,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) factor = btrfs_bg_type_to_factor(block_group->flags); free_bytes += (block_group->key.offset - - btrfs_block_group_used(&block_group->item)) * - factor; + block_group->used) * factor; spin_unlock(&block_group->lock); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2a831eb8a66c..e4ea277d4e01 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -828,7 +828,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) struct btrfs_path *path; int ret = 0; bool matched; - u64 used = btrfs_block_group_used(&block_group->item); + u64 used = block_group->used; /* * If this block group has been marked to be cleared for one reason or diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a51f8fbadf41..4f92241ea28e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4039,8 +4039,7 @@ static void get_block_group_info(struct list_head *groups_list, list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; space->total_bytes += block_group->key.offset; - space->used_bytes += - btrfs_block_group_used(&block_group->item); + space->used_bytes += block_group->used; } } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 90b80da5abe6..23e9232e1a81 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4405,7 +4405,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) WARN_ON(rc->block_group->pinned > 0); WARN_ON(rc->block_group->reserved > 0); - WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); + WARN_ON(rc->block_group->used > 0); out: if (err && rw) btrfs_dec_block_group_ro(rc->block_group); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a7b043fd7a57..00313a182036 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3678,7 +3678,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && - btrfs_block_group_used(&cache->item) == 0) { + cache->used == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 7539696b4cba..749b0be58e2c 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -302,7 +302,7 @@ again: btrfs_info(fs_info, "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", cache->key.objectid, cache->key.offset, - btrfs_block_group_used(&cache->item), cache->pinned, + cache->used, cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 98979dd048a8..8e2763380e56 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -404,7 +404,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) val += block_group->key.offset; else - val += btrfs_block_group_used(&block_group->item); + val += block_group->used; } up_read(&sinfo->groups_sem); return snprintf(buf, PAGE_SIZE, "%llu\n", val); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 735627537f45..2a0637342f08 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3189,7 +3189,7 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); - chunk_used = btrfs_block_group_used(&cache->item); + chunk_used = cache->used; if (bargs->usage_min == 0) user_thresh_min = 0; @@ -3220,7 +3220,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); - chunk_used = btrfs_block_group_used(&cache->item); + chunk_used = cache->used; if (bargs->usage_min == 0) user_thresh = 1; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 522d1f2b13e3..7b842b049ea3 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -716,8 +716,7 @@ TRACE_EVENT(btrfs_add_block_group, __entry->offset = block_group->key.objectid; __entry->size = block_group->key.offset; __entry->flags = block_group->flags; - __entry->bytes_used = - btrfs_block_group_used(&block_group->item); + __entry->bytes_used = block_group->used; __entry->bytes_super = block_group->bytes_super; __entry->create = create; ), @@ -1859,7 +1858,7 @@ DECLARE_EVENT_CLASS(btrfs__block_group, TP_fast_assign_btrfs(bg_cache->fs_info, __entry->bytenr = bg_cache->key.objectid, __entry->len = bg_cache->key.offset, - __entry->used = btrfs_block_group_used(&bg_cache->item); + __entry->used = bg_cache->used; __entry->flags = bg_cache->flags; ), -- cgit v1.2.3-59-g8ed1b From f93c63e54730428f474267600fa3a901881d2829 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:13 +0200 Subject: btrfs: move block_group_item::flags to block group The flags are read from the item that's embedded to block group struct, but the item will be removed. Use the ::flags after read and before write. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 4da5e0f6cb82..52e2a05c8345 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1755,7 +1755,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) /* Duplicate as the item is still partially used */ memcpy(&cache->item, &bgi, sizeof(bgi)); cache->used = btrfs_block_group_used(&bgi); - cache->flags = btrfs_block_group_flags(&cache->item); + cache->flags = btrfs_block_group_flags(&bgi); if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { @@ -1885,6 +1885,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) */ memcpy(&item, &block_group->item, sizeof(item)); btrfs_set_block_group_used(&item, block_group->used); + btrfs_set_block_group_flags(&item, block_group->flags); memcpy(&key, &block_group->key, sizeof(key)); spin_unlock(&block_group->lock); @@ -1920,8 +1921,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, cache->used = bytes_used; btrfs_set_block_group_chunk_objectid(&cache->item, BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_block_group_flags(&cache->item, type); - cache->flags = type; cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; @@ -2140,6 +2139,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, /* Partial copy of item, update the rest from memory */ memcpy(&bgi, &cache->item, sizeof(bgi)); btrfs_set_block_group_used(&bgi, cache->used); + btrfs_set_block_group_flags(&bgi, cache->flags); write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); btrfs_mark_buffer_dirty(leaf); fail: -- cgit v1.2.3-59-g8ed1b From 3d976388daa9ed77c9d2ded914ad07385b37d97e Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:15 +0200 Subject: btrfs: remove embedded block_group_cache::item The members ::used and ::flags are now in the block group cache structure, the last one is chunk_objectid, but that's set to a fixed value and otherwise unused. The item is constructed from a local variable before write, so we can remove the embedded one from block group. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 16 +++++----------- fs/btrfs/block-group.h | 1 - 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 52e2a05c8345..0bb1cc5f3263 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1752,8 +1752,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(bgi)); - /* Duplicate as the item is still partially used */ - memcpy(&cache->item, &bgi, sizeof(bgi)); + /* cache::chunk_objectid is unused */ cache->used = btrfs_block_group_used(&bgi); cache->flags = btrfs_block_group_flags(&bgi); if (!mixed && @@ -1879,12 +1878,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) goto next; spin_lock(&block_group->lock); - /* - * Copy partially filled item from the cache and ovewrite used - * that has the correct value - */ - memcpy(&item, &block_group->item, sizeof(item)); btrfs_set_block_group_used(&item, block_group->used); + btrfs_set_block_group_chunk_objectid(&item, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_block_group_flags(&item, block_group->flags); memcpy(&key, &block_group->key, sizeof(key)); spin_unlock(&block_group->lock); @@ -1919,8 +1915,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, return -ENOMEM; cache->used = bytes_used; - btrfs_set_block_group_chunk_objectid(&cache->item, - BTRFS_FIRST_CHUNK_TREE_OBJECTID); cache->flags = type; cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; @@ -2136,9 +2130,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); - /* Partial copy of item, update the rest from memory */ - memcpy(&bgi, &cache->item, sizeof(bgi)); btrfs_set_block_group_used(&bgi, cache->used); + btrfs_set_block_group_chunk_objectid(&bgi, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_block_group_flags(&bgi, cache->flags); write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); btrfs_mark_buffer_dirty(leaf); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 8fa4a70228ee..d78fce7cd3a4 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -44,7 +44,6 @@ struct btrfs_caching_control { struct btrfs_block_group_cache { struct btrfs_key key; - struct btrfs_block_group_item item; struct btrfs_fs_info *fs_info; struct inode *inode; spinlock_t lock; -- cgit v1.2.3-59-g8ed1b From de0dc456fd62d387a596508aac1c75736ef6c760 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:18 +0200 Subject: btrfs: rename block_group_item on-stack accessors to follow naming All accessors defined by BTRFS_SETGET_STACK_FUNCS contain _stack_ in the name, the block group ones were not following that scheme, so let's switch them. Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 18 +++++++++--------- fs/btrfs/ctree.h | 6 +++--- fs/btrfs/tree-checker.c | 10 +++++----- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 0bb1cc5f3263..3957b1817385 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1474,7 +1474,7 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info, read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), sizeof(bg)); - flags = btrfs_block_group_flags(&bg) & + flags = btrfs_stack_block_group_flags(&bg) & BTRFS_BLOCK_GROUP_TYPE_MASK; if (flags != (em->map_lookup->type & @@ -1753,8 +1753,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(bgi)); /* cache::chunk_objectid is unused */ - cache->used = btrfs_block_group_used(&bgi); - cache->flags = btrfs_block_group_flags(&bgi); + cache->used = btrfs_stack_block_group_used(&bgi); + cache->flags = btrfs_stack_block_group_flags(&bgi); if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { @@ -1878,10 +1878,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) goto next; spin_lock(&block_group->lock); - btrfs_set_block_group_used(&item, block_group->used); - btrfs_set_block_group_chunk_objectid(&item, + btrfs_set_stack_block_group_used(&item, block_group->used); + btrfs_set_stack_block_group_chunk_objectid(&item, BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_block_group_flags(&item, block_group->flags); + btrfs_set_stack_block_group_flags(&item, block_group->flags); memcpy(&key, &block_group->key, sizeof(key)); spin_unlock(&block_group->lock); @@ -2130,10 +2130,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); - btrfs_set_block_group_used(&bgi, cache->used); - btrfs_set_block_group_chunk_objectid(&bgi, + btrfs_set_stack_block_group_used(&bgi, cache->used); + btrfs_set_stack_block_group_chunk_objectid(&bgi, BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_block_group_flags(&bgi, cache->flags); + btrfs_set_stack_block_group_flags(&bgi, cache->flags); write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); btrfs_mark_buffer_dirty(leaf); fail: diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4a842c0fa062..cbec0f071729 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1519,18 +1519,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, } /* struct btrfs_block_group_item */ -BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item, used, 64); BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, used, 64); -BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); BTRFS_SETGET_FUNCS(disk_block_group_flags, struct btrfs_block_group_item, flags, 64); -BTRFS_SETGET_STACK_FUNCS(block_group_flags, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags, struct btrfs_block_group_item, flags, 64); /* struct btrfs_free_space_info */ diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index f4751615a189..6638483e8ad8 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -512,23 +512,23 @@ static int check_block_group_item(struct extent_buffer *leaf, read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), sizeof(bgi)); - if (btrfs_block_group_chunk_objectid(&bgi) != + if (btrfs_stack_block_group_chunk_objectid(&bgi) != BTRFS_FIRST_CHUNK_TREE_OBJECTID) { block_group_err(leaf, slot, "invalid block group chunk objectid, have %llu expect %llu", - btrfs_block_group_chunk_objectid(&bgi), + btrfs_stack_block_group_chunk_objectid(&bgi), BTRFS_FIRST_CHUNK_TREE_OBJECTID); return -EUCLEAN; } - if (btrfs_block_group_used(&bgi) > key->offset) { + if (btrfs_stack_block_group_used(&bgi) > key->offset) { block_group_err(leaf, slot, "invalid block group used, have %llu expect [0, %llu)", - btrfs_block_group_used(&bgi), key->offset); + btrfs_stack_block_group_used(&bgi), key->offset); return -EUCLEAN; } - flags = btrfs_block_group_flags(&bgi); + flags = btrfs_stack_block_group_flags(&bgi); if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { block_group_err(leaf, slot, "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", -- cgit v1.2.3-59-g8ed1b From 0222dfdd4af1be2e70db956db61f9b43386ab76b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:20 +0200 Subject: btrfs: rename extent buffer block group item accessors Accessors defined by BTRFS_SETGET_FUNCS take a raw extent buffer and manipulate the items there, there's no special prefix required. The block group accessors had _disk_ because previously the names were occupied by the on-stack accessors. As this has been addressed in the previous patch, we can now unify the naming. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 +++--- fs/btrfs/print-tree.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index cbec0f071729..1c8f01eaf27c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1521,14 +1521,14 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, /* struct btrfs_block_group_item */ BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item, used, 64); -BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, +BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); -BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, +BTRFS_SETGET_FUNCS(block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); -BTRFS_SETGET_FUNCS(disk_block_group_flags, +BTRFS_SETGET_FUNCS(block_group_flags, struct btrfs_block_group_item, flags, 64); BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags, struct btrfs_block_group_item, flags, 64); diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 9cb50577d982..873b6b694107 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -266,9 +266,9 @@ void btrfs_print_leaf(struct extent_buffer *l) struct btrfs_block_group_item); pr_info( "\t\tblock group used %llu chunk_objectid %llu flags %llu\n", - btrfs_disk_block_group_used(l, bi), - btrfs_disk_block_group_chunk_objectid(l, bi), - btrfs_disk_block_group_flags(l, bi)); + btrfs_block_group_used(l, bi), + btrfs_block_group_chunk_objectid(l, bi), + btrfs_block_group_flags(l, bi)); break; case BTRFS_CHUNK_ITEM_KEY: print_chunk(l, btrfs_item_ptr(l, i, -- cgit v1.2.3-59-g8ed1b From b3470b5dbe1300dea94191ae4b7d070be9a5cdc9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 23 Oct 2019 18:48:22 +0200 Subject: btrfs: add dedicated members for start and length of a block group The on-disk format of block group item makes use of the key that stores the offset and length. This is further used in the code, although this makes thing harder to understand. The key is also packed so the offset/length is not properly aligned as u64. Add start (key.objectid) and length (key.offset) members to block group and remove the embedded key. When the item is searched or written, a local variable for key is used. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 137 +++++++++++++++++---------------- fs/btrfs/block-group.h | 3 +- fs/btrfs/extent-tree.c | 28 +++---- fs/btrfs/free-space-cache.c | 37 +++++---- fs/btrfs/free-space-tree.c | 83 ++++++++++---------- fs/btrfs/ioctl.c | 2 +- fs/btrfs/reada.c | 4 +- fs/btrfs/relocation.c | 16 ++-- fs/btrfs/scrub.c | 8 +- fs/btrfs/space-info.c | 3 +- fs/btrfs/sysfs.c | 2 +- fs/btrfs/tests/btrfs-tests.c | 5 +- fs/btrfs/tests/free-space-tree-tests.c | 75 ++++++++---------- fs/btrfs/volumes.c | 15 ++-- include/trace/events/btrfs.h | 16 ++-- 15 files changed, 210 insertions(+), 224 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 3957b1817385..1e521db3ef56 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -162,9 +162,9 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, parent = *p; cache = rb_entry(parent, struct btrfs_block_group_cache, cache_node); - if (block_group->key.objectid < cache->key.objectid) { + if (block_group->start < cache->start) { p = &(*p)->rb_left; - } else if (block_group->key.objectid > cache->key.objectid) { + } else if (block_group->start > cache->start) { p = &(*p)->rb_right; } else { spin_unlock(&info->block_group_cache_lock); @@ -176,8 +176,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, rb_insert_color(&block_group->cache_node, &info->block_group_cache_tree); - if (info->first_logical_byte > block_group->key.objectid) - info->first_logical_byte = block_group->key.objectid; + if (info->first_logical_byte > block_group->start) + info->first_logical_byte = block_group->start; spin_unlock(&info->block_group_cache_lock); @@ -201,11 +201,11 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( while (n) { cache = rb_entry(n, struct btrfs_block_group_cache, cache_node); - end = cache->key.objectid + cache->key.offset - 1; - start = cache->key.objectid; + end = cache->start + cache->length - 1; + start = cache->start; if (bytenr < start) { - if (!contains && (!ret || start < ret->key.objectid)) + if (!contains && (!ret || start < ret->start)) ret = cache; n = n->rb_left; } else if (bytenr > start) { @@ -221,8 +221,8 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( } if (ret) { btrfs_get_block_group(ret); - if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) - info->first_logical_byte = ret->key.objectid; + if (bytenr == 0 && info->first_logical_byte > ret->start) + info->first_logical_byte = ret->start; } spin_unlock(&info->block_group_cache_lock); @@ -257,7 +257,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( /* If our block group was removed, we need a full search. */ if (RB_EMPTY_NODE(&cache->cache_node)) { - const u64 next_bytenr = cache->key.objectid + cache->key.offset; + const u64 next_bytenr = cache->start + cache->length; spin_unlock(&fs_info->block_group_cache_lock); btrfs_put_block_group(cache); @@ -427,8 +427,8 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) static void fragment_free_space(struct btrfs_block_group_cache *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; - u64 start = block_group->key.objectid; - u64 len = block_group->key.offset; + u64 start = block_group->start; + u64 len = block_group->length; u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? fs_info->nodesize : fs_info->sectorsize; u64 step = chunk << 1; @@ -507,7 +507,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) if (!path) return -ENOMEM; - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); + last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); #ifdef CONFIG_BTRFS_DEBUG /* @@ -587,13 +587,12 @@ next: goto next; } - if (key.objectid < block_group->key.objectid) { + if (key.objectid < block_group->start) { path->slots[0]++; continue; } - if (key.objectid >= block_group->key.objectid + - block_group->key.offset) + if (key.objectid >= block_group->start + block_group->length) break; if (key.type == BTRFS_EXTENT_ITEM_KEY || @@ -617,8 +616,7 @@ next: ret = 0; total_found += add_new_free_space(block_group, last, - block_group->key.objectid + - block_group->key.offset); + block_group->start + block_group->length); caching_ctl->progress = (u64)-1; out: @@ -656,7 +654,7 @@ static noinline void caching_thread(struct btrfs_work *work) spin_lock(&block_group->space_info->lock); spin_lock(&block_group->lock); - bytes_used = block_group->key.offset - block_group->used; + bytes_used = block_group->length - block_group->used; block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); @@ -692,7 +690,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, mutex_init(&caching_ctl->mutex); init_waitqueue_head(&caching_ctl->wait); caching_ctl->block_group = cache; - caching_ctl->progress = cache->key.objectid; + caching_ctl->progress = cache->start; refcount_set(&caching_ctl->count, 1); btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); @@ -761,7 +759,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); - bytes_used = cache->key.offset - cache->used; + bytes_used = cache->length - cache->used; cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); @@ -883,10 +881,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * remove it. */ btrfs_free_excluded_extents(block_group); - btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, - block_group->key.offset); + btrfs_free_ref_tree_range(fs_info, block_group->start, + block_group->length); - memcpy(&key, &block_group->key, sizeof(key)); index = btrfs_bg_flags_to_raid_index(block_group->flags); factor = btrfs_bg_type_to_factor(block_group->flags); @@ -964,8 +961,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; key.type = 0; + key.offset = block_group->start; ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) @@ -984,7 +981,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); - if (fs_info->first_logical_byte == block_group->key.objectid) + if (fs_info->first_logical_byte == block_group->start) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -1045,19 +1042,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { WARN_ON(block_group->space_info->total_bytes - < block_group->key.offset); + < block_group->length); WARN_ON(block_group->space_info->bytes_readonly - < block_group->key.offset); + < block_group->length); WARN_ON(block_group->space_info->disk_total - < block_group->key.offset * factor); + < block_group->length * factor); } - block_group->space_info->total_bytes -= block_group->key.offset; - block_group->space_info->bytes_readonly -= block_group->key.offset; - block_group->space_info->disk_total -= block_group->key.offset * factor; + block_group->space_info->total_bytes -= block_group->length; + block_group->space_info->bytes_readonly -= block_group->length; + block_group->space_info->disk_total -= block_group->length * factor; spin_unlock(&block_group->space_info->lock); - memcpy(&key, &block_group->key, sizeof(key)); + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; mutex_lock(&fs_info->chunk_mutex); spin_lock(&block_group->lock); @@ -1206,7 +1205,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) goto out; } - num_bytes = cache->key.offset - cache->reserved - cache->pinned - + num_bytes = cache->length - cache->reserved - cache->pinned - cache->bytes_super - cache->used; sinfo_used = btrfs_space_info_used(sinfo, true); @@ -1228,8 +1227,7 @@ out: spin_unlock(&sinfo->lock); if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { btrfs_info(cache->fs_info, - "unable to make block group %llu ro", - cache->key.objectid); + "unable to make block group %llu ro", cache->start); btrfs_info(cache->fs_info, "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", sinfo_used, num_bytes, min_allocable_bytes); @@ -1304,7 +1302,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * properly if we fail to join the transaction. */ trans = btrfs_start_trans_remove_block_group(fs_info, - block_group->key.objectid); + block_group->start); if (IS_ERR(trans)) { btrfs_dec_block_group_ro(block_group); ret = PTR_ERR(trans); @@ -1315,8 +1313,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * We could have pending pinned extents for this block group, * just delete them, we don't care about them anymore. */ - start = block_group->key.objectid; - end = start + block_group->key.offset - 1; + start = block_group->start; + end = start + block_group->length - 1; /* * Hold the unused_bg_unpin_mutex lock to avoid racing with * btrfs_finish_extent_commit(). If we are at transaction N, @@ -1371,7 +1369,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Btrfs_remove_chunk will abort the transaction if things go * horribly wrong. */ - ret = btrfs_remove_chunk(trans, block_group->key.objectid); + ret = btrfs_remove_chunk(trans, block_group->start); if (ret) { if (trimming) @@ -1522,10 +1520,10 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) int stripe_len; int i, nr, ret; - if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { - stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; + if (cache->start < BTRFS_SUPER_INFO_OFFSET) { + stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; cache->bytes_super += stripe_len; - ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, + ret = btrfs_add_excluded_extent(fs_info, cache->start, stripe_len); if (ret) return ret; @@ -1533,7 +1531,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(fs_info, cache->key.objectid, + ret = btrfs_rmap_block(fs_info, cache->start, bytenr, &logical, &nr, &stripe_len); if (ret) return ret; @@ -1541,21 +1539,19 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) while (nr--) { u64 start, len; - if (logical[nr] > cache->key.objectid + - cache->key.offset) + if (logical[nr] > cache->start + cache->length) continue; - if (logical[nr] + stripe_len <= cache->key.objectid) + if (logical[nr] + stripe_len <= cache->start) continue; start = logical[nr]; - if (start < cache->key.objectid) { - start = cache->key.objectid; + if (start < cache->start) { + start = cache->start; len = (logical[nr] + stripe_len) - start; } else { len = min_t(u64, stripe_len, - cache->key.objectid + - cache->key.offset - start); + cache->start + cache->length - start); } cache->bytes_super += len; @@ -1603,9 +1599,8 @@ static struct btrfs_block_group_cache *btrfs_create_block_group_cache( return NULL; } - cache->key.objectid = start; - cache->key.offset = size; - cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + cache->start = start; + cache->length = size; cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); @@ -1661,15 +1656,14 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) free_extent_map(em); break; } - if (bg->key.objectid != em->start || - bg->key.offset != em->len || + if (bg->start != em->start || bg->length != em->len || (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { btrfs_err(fs_info, "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", em->start, em->len, em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, - bg->key.objectid, bg->key.offset, + bg->start, bg->length, bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); ret = -EUCLEAN; free_extent_map(em); @@ -1760,7 +1754,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { btrfs_err(info, "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", - cache->key.objectid); + cache->start); btrfs_put_block_group(cache); ret = -EINVAL; goto error; @@ -1822,7 +1816,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) link_block_group(cache); set_avail_alloc_bits(info, cache->flags); - if (btrfs_chunk_readonly(info, cache->key.objectid)) { + if (btrfs_chunk_readonly(info, cache->start)) { inc_block_group_ro(cache, 1); } else if (cache->used == 0) { ASSERT(list_empty(&cache->bg_list)); @@ -1882,7 +1876,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) btrfs_set_stack_block_group_chunk_objectid(&item, BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_stack_block_group_flags(&item, block_group->flags); - memcpy(&key, &block_group->key, sizeof(key)); + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; spin_unlock(&block_group->lock); ret = btrfs_insert_item(trans, extent_root, &key, &item, @@ -2101,7 +2097,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_lock(&sinfo->lock); spin_lock(&cache->lock); if (!--cache->ro) { - num_bytes = cache->key.offset - cache->reserved - + num_bytes = cache->length - cache->reserved - cache->pinned - cache->bytes_super - cache->used; sinfo->bytes_readonly -= num_bytes; list_del_init(&cache->ro_list); @@ -2120,8 +2116,13 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, unsigned long bi; struct extent_buffer *leaf; struct btrfs_block_group_item bgi; + struct btrfs_key key; + + key.objectid = cache->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = cache->length; - ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); + ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2160,7 +2161,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, * If this block group is smaller than 100 megs don't bother caching the * block group. */ - if (block_group->key.offset < (100 * SZ_1M)) { + if (block_group->length < (100 * SZ_1M)) { spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_WRITTEN; spin_unlock(&block_group->lock); @@ -2261,7 +2262,7 @@ again: * taking up quite a bit since it's not folded into the other space * cache. */ - num_pages = div_u64(block_group->key.offset, SZ_256M); + num_pages = div_u64(block_group->length, SZ_256M); if (!num_pages) num_pages = 1; @@ -2668,8 +2669,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, if (!alloc && !btrfs_block_group_cache_done(cache)) btrfs_cache_block_group(cache, 1); - byte_in_group = bytenr - cache->key.objectid; - WARN_ON(byte_in_group > cache->key.offset); + byte_in_group = bytenr - cache->start; + WARN_ON(byte_in_group > cache->length); spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); @@ -2679,7 +2680,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, cache->disk_cache_state = BTRFS_DC_CLEAR; old_val = cache->used; - num_bytes = min(total, cache->key.offset - byte_in_group); + num_bytes = min(total, cache->length - byte_in_group); if (alloc) { old_val += num_bytes; cache->used = old_val; @@ -3076,7 +3077,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) spin_unlock(&block_group->lock); ASSERT(block_group->io_ctl.inode == NULL); iput(inode); - last = block_group->key.objectid + block_group->key.offset; + last = block_group->start + block_group->length; btrfs_put_block_group(block_group); } } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index d78fce7cd3a4..2ea580352aff 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -43,10 +43,11 @@ struct btrfs_caching_control { #define CACHING_CTL_WAKE_UP SZ_2M struct btrfs_block_group_cache { - struct btrfs_key key; struct btrfs_fs_info *fs_info; struct inode *inode; spinlock_t lock; + u64 start; + u64 length; u64 pinned; u64 reserved; u64 used; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e3fd4b0ca905..b24d116eaf0b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -75,8 +75,8 @@ void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) struct btrfs_fs_info *fs_info = cache->fs_info; u64 start, end; - start = cache->key.objectid; - end = start + cache->key.offset - 1; + start = cache->start; + end = start + cache->length - 1; clear_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_UPTODATE); @@ -2560,7 +2560,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) if (!cache) return 0; - bytenr = cache->key.objectid; + bytenr = cache->start; btrfs_put_block_group(cache); return bytenr; @@ -2796,7 +2796,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, while (start <= end) { readonly = false; if (!cache || - start >= cache->key.objectid + cache->key.offset) { + start >= cache->start + cache->length) { if (cache) btrfs_put_block_group(cache); total_unpinned = 0; @@ -2809,7 +2809,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, empty_cluster <<= 1; } - len = cache->key.objectid + cache->key.offset - start; + len = cache->start + cache->length - start; len = min(len, end + 1 - start); if (start < cache->last_byte_to_unpin) { @@ -2925,8 +2925,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) ret = -EROFS; if (!trans->aborted) ret = btrfs_discard_extent(fs_info, - block_group->key.objectid, - block_group->key.offset, + block_group->start, + block_group->length, &trimmed); list_del_init(&block_group->bg_list); @@ -3492,7 +3492,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg, goto release_cluster; offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, - ffe_ctl->num_bytes, cluster_bg->key.objectid, + ffe_ctl->num_bytes, cluster_bg->start, &ffe_ctl->max_extent_size); if (offset) { /* We have a block, we're done */ @@ -3903,7 +3903,7 @@ search: continue; btrfs_grab_block_group(block_group, delalloc); - ffe_ctl.search_start = block_group->key.objectid; + ffe_ctl.search_start = block_group->start; /* * this can happen if we end up cycling through all the @@ -3983,7 +3983,7 @@ checks: /* move on to the next group */ if (ffe_ctl.search_start + num_bytes > - block_group->key.objectid + block_group->key.offset) { + block_group->start + block_group->length) { btrfs_add_free_space(block_group, ffe_ctl.found_offset, num_bytes); goto loop; @@ -5497,7 +5497,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) } factor = btrfs_bg_type_to_factor(block_group->flags); - free_bytes += (block_group->key.offset - + free_bytes += (block_group->length - block_group->used) * factor; spin_unlock(&block_group->lock); @@ -5645,13 +5645,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) cache = btrfs_lookup_first_block_group(fs_info, range->start); for (; cache; cache = btrfs_next_block_group(cache)) { - if (cache->key.objectid >= range_end) { + if (cache->start >= range_end) { btrfs_put_block_group(cache); break; } - start = max(range->start, cache->key.objectid); - end = min(range_end, cache->key.objectid + cache->key.offset); + start = max(range->start, cache->start); + end = min(range_end, cache->start + cache->length); if (end - start >= range->minlen) { if (!btrfs_block_group_cache_done(cache)) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index e4ea277d4e01..279c41c4ba50 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -107,7 +107,7 @@ struct inode *lookup_free_space_inode( return inode; inode = __lookup_free_space_inode(fs_info->tree_root, path, - block_group->key.objectid); + block_group->start); if (IS_ERR(inode)) return inode; @@ -201,7 +201,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans, return ret; return __create_free_space_inode(trans->fs_info->tree_root, trans, path, - ino, block_group->key.objectid); + ino, block_group->start); } int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, @@ -882,13 +882,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) spin_unlock(&block_group->lock); ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, - path, block_group->key.objectid); + path, block_group->start); btrfs_free_path(path); if (ret <= 0) goto out; spin_lock(&ctl->tree_lock); - matched = (ctl->free_space == (block_group->key.offset - used - + matched = (ctl->free_space == (block_group->length - used - block_group->bytes_super)); spin_unlock(&ctl->tree_lock); @@ -896,7 +896,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) __btrfs_remove_free_space_cache(ctl); btrfs_warn(fs_info, "block group %llu has wrong amount of free space", - block_group->key.objectid); + block_group->start); ret = -1; } out: @@ -909,7 +909,7 @@ out: btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", - block_group->key.objectid); + block_group->start); } iput(inode); @@ -1067,9 +1067,9 @@ static noinline_for_stack int write_pinned_extent_entries( */ unpin = block_group->fs_info->pinned_extents; - start = block_group->key.objectid; + start = block_group->start; - while (start < block_group->key.objectid + block_group->key.offset) { + while (start < block_group->start + block_group->length) { ret = find_first_extent_bit(unpin, start, &extent_start, &extent_end, EXTENT_DIRTY, NULL); @@ -1077,13 +1077,12 @@ static noinline_for_stack int write_pinned_extent_entries( return 0; /* This pinned extent is out of our range */ - if (extent_start >= block_group->key.objectid + - block_group->key.offset) + if (extent_start >= block_group->start + block_group->length) return 0; extent_start = max(extent_start, start); - extent_end = min(block_group->key.objectid + - block_group->key.offset, extent_end + 1); + extent_end = min(block_group->start + block_group->length, + extent_end + 1); len = extent_end - extent_start; *entries += 1; @@ -1174,7 +1173,7 @@ out: #ifdef DEBUG btrfs_err(root->fs_info, "failed to write free space cache for block group %llu", - block_group->key.objectid); + block_group->start); #endif } } @@ -1221,7 +1220,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, { return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, block_group, &block_group->io_ctl, - path, block_group->key.objectid); + path, block_group->start); } /** @@ -1400,7 +1399,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans, #ifdef DEBUG btrfs_err(fs_info, "failed to write free space cache for block group %llu", - block_group->key.objectid); + block_group->start); #endif spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; @@ -1657,7 +1656,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; - u64 size = block_group->key.offset; + u64 size = block_group->length; u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); @@ -2034,7 +2033,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, * so allow those block groups to still be allowed to have a bitmap * entry. */ - if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset) + if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) return false; return true; @@ -2516,7 +2515,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) spin_lock_init(&ctl->tree_lock); ctl->unit = fs_info->sectorsize; - ctl->start = block_group->key.objectid; + ctl->start = block_group->start; ctl->private = block_group; ctl->op = &free_space_op; INIT_LIST_HEAD(&ctl->trimming_ranges); @@ -3379,7 +3378,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) mutex_lock(&fs_info->chunk_mutex); em_tree = &fs_info->mapping_tree; write_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, block_group->key.objectid, + em = lookup_extent_mapping(em_tree, block_group->start, 1); BUG_ON(!em); /* logic error, can't happen */ remove_extent_mapping(em_tree, em); diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 48a03f5240f5..d3aa65a4c76f 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -27,8 +27,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) * exceeds that required for using bitmaps. */ bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; - num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, - bitmap_range); + num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range); bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; total_bitmap_size = num_bitmaps * bitmap_size; cache->bitmap_high_thresh = div_u64(total_bitmap_size, @@ -54,9 +53,9 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; - key.offset = block_group->key.offset; + key.offset = block_group->length; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); if (ret) @@ -86,16 +85,16 @@ struct btrfs_free_space_info *search_free_space_info( struct btrfs_key key; int ret; - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; - key.offset = block_group->key.offset; + key.offset = block_group->length; ret = btrfs_search_slot(trans, root, &key, path, 0, cow); if (ret < 0) return ERR_PTR(ret); if (ret != 0) { btrfs_warn(fs_info, "missing free space info for %llu", - block_group->key.objectid); + block_group->start); ASSERT(0); return ERR_PTR(-ENOENT); } @@ -197,7 +196,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, int done = 0, nr; int ret; - bitmap_size = free_space_bitmap_size(block_group->key.offset, + bitmap_size = free_space_bitmap_size(block_group->length, fs_info->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { @@ -205,8 +204,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -224,8 +223,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { @@ -271,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -336,7 +335,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, int done = 0, nr; int ret; - bitmap_size = free_space_bitmap_size(block_group->key.offset, + bitmap_size = free_space_bitmap_size(block_group->length, fs_info->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { @@ -344,8 +343,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -363,8 +362,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { @@ -413,7 +412,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); - nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize); + nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize); start_bit = find_next_bit_le(bitmap, nrbits, 0); while (start_bit < nrbits) { @@ -437,7 +436,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -597,7 +596,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, * Read the bit for the block immediately before the extent of space if * that block is within the block group. */ - if (start > block_group->key.objectid) { + if (start > block_group->start) { u64 prev_block = start - block_group->fs_info->sectorsize; key.objectid = prev_block; @@ -649,7 +648,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, * Read the bit for the block immediately after the extent of space if * that block is within the block group. */ - if (end < block_group->key.objectid + block_group->key.offset) { + if (end < block_group->start + block_group->length) { /* The next block may be in the next bitmap. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (end >= key.objectid + key.offset) { @@ -880,7 +879,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, new_key.offset = size; /* Search for a neighbor on the left. */ - if (start == block_group->key.objectid) + if (start == block_group->start) goto right; key.objectid = start - 1; key.type = (u8)-1; @@ -900,8 +899,8 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, found_start = key.objectid; found_end = key.objectid + key.offset; - ASSERT(found_start >= block_group->key.objectid && - found_end > block_group->key.objectid); + ASSERT(found_start >= block_group->start && + found_end > block_group->start); ASSERT(found_start < start && found_end <= start); /* @@ -920,7 +919,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, right: /* Search for a neighbor on the right. */ - if (end == block_group->key.objectid + block_group->key.offset) + if (end == block_group->start + block_group->length) goto insert; key.objectid = end; key.type = (u8)-1; @@ -940,8 +939,8 @@ right: found_start = key.objectid; found_end = key.objectid + key.offset; - ASSERT(found_start >= block_group->key.objectid && - found_end > block_group->key.objectid); + ASSERT(found_start >= block_group->start && + found_end > block_group->start); ASSERT((found_start < start && found_end <= start) || (found_start >= end && found_end > end)); @@ -1075,7 +1074,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's * contained in. */ - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = 0; @@ -1084,8 +1083,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, goto out_locked; ASSERT(ret == 0); - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; while (1) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -1109,7 +1108,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, else start += key.offset; } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { - if (key.objectid != block_group->key.objectid) + if (key.objectid != block_group->start) break; } @@ -1277,8 +1276,8 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, return ret; return __add_to_free_space_tree(trans, block_group, path, - block_group->key.objectid, - block_group->key.offset); + block_group->start, + block_group->length); } int add_block_group_free_space(struct btrfs_trans_handle *trans, @@ -1336,8 +1335,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -1355,8 +1354,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; nr++; path->slots[0]--; @@ -1407,7 +1406,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, fs_info = block_group->fs_info; root = fs_info->free_space_root; - end = block_group->key.objectid + block_group->key.offset; + end = block_group->start + block_group->length; while (1) { ret = btrfs_next_item(root, path); @@ -1454,7 +1453,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -1485,7 +1484,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, fs_info = block_group->fs_info; root = fs_info->free_space_root; - end = block_group->key.objectid + block_group->key.offset; + end = block_group->start + block_group->length; while (1) { ret = btrfs_next_item(root, path); @@ -1516,7 +1515,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4f92241ea28e..4cf255830bc5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4038,7 +4038,7 @@ static void get_block_group_info(struct list_head *groups_list, space->flags = 0; list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; - space->total_bytes += block_group->key.offset; + space->total_bytes += block_group->length; space->used_bytes += block_group->used; } } diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 1feaeadc8cf5..907c5d79a197 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -248,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, if (!cache) return NULL; - start = cache->key.objectid; - end = start + cache->key.offset - 1; + start = cache->start; + end = start + cache->length - 1; btrfs_put_block_group(cache); zone = kzalloc(sizeof(*zone), GFP_KERNEL); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 23e9232e1a81..231aedd0ec52 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1563,8 +1563,8 @@ again: static int in_block_group(u64 bytenr, struct btrfs_block_group_cache *block_group) { - if (bytenr >= block_group->key.objectid && - bytenr < block_group->key.objectid + block_group->key.offset) + if (bytenr >= block_group->start && + bytenr < block_group->start + block_group->length) return 1; return 0; } @@ -3863,7 +3863,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, u64 start, end, last; int ret; - last = rc->block_group->key.objectid + rc->block_group->key.offset; + last = rc->block_group->start + rc->block_group->length; while (1) { cond_resched(); if (rc->search_start >= last) { @@ -3980,7 +3980,7 @@ int prepare_to_relocate(struct reloc_control *rc) return -ENOMEM; memset(&rc->cluster, 0, sizeof(rc->cluster)); - rc->search_start = rc->block_group->key.objectid; + rc->search_start = rc->block_group->start; rc->extents_found = 0; rc->nodes_relocated = 0; rc->merging_rsv_size = 0; @@ -4248,7 +4248,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, root); BUG_ON(IS_ERR(inode)); - BTRFS_I(inode)->index_cnt = group->key.objectid; + BTRFS_I(inode)->index_cnt = group->start; err = btrfs_orphan_add(trans, BTRFS_I(inode)); out: @@ -4291,7 +4291,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info, btrfs_info(fs_info, "relocating block group %llu flags %s", - block_group->key.objectid, buf); + block_group->start, buf); } /* @@ -4364,8 +4364,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_nocow_writers(rc->block_group); btrfs_wait_ordered_roots(fs_info, U64_MAX, - rc->block_group->key.objectid, - rc->block_group->key.offset); + rc->block_group->start, + rc->block_group->length); while (1) { mutex_lock(&fs_info->cleaner_mutex); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 00313a182036..4a5a4e4ef707 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -404,8 +404,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ - ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * - cache->full_stripe_len + cache->key.objectid; + ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * + cache->full_stripe_len + cache->start; return ret; } @@ -3583,8 +3583,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, - cache->key.objectid, - cache->key.offset); + cache->start, + cache->length); if (ret > 0) { struct btrfs_trans_handle *trans; diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 749b0be58e2c..3a2b0d1155a8 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -301,8 +301,7 @@ again: spin_lock(&cache->lock); btrfs_info(fs_info, "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", - cache->key.objectid, cache->key.offset, - cache->used, cache->pinned, + cache->start, cache->length, cache->used, cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 8e2763380e56..786f4e4c6542 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -402,7 +402,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, down_read(&sinfo->groups_sem); list_for_each_entry(block_group, &sinfo->block_groups[index], list) { if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) - val += block_group->key.offset; + val += block_group->length; else val += block_group->used; } diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 99fe9bf3fdac..f4bb5e2a4ba5 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -218,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, return NULL; } - cache->key.objectid = 0; - cache->key.offset = length; - cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + cache->start = 0; + cache->length = length; cache->full_stripe_len = fs_info->sectorsize; cache->fs_info = fs_info; diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index bc92df977630..188f08bd44b0 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -48,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans, if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { if (path->slots[0] != 0) goto invalid; - end = cache->key.objectid + cache->key.offset; + end = cache->start + cache->length; i = 0; while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -155,7 +155,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, cache->key.offset}, + {cache->start, cache->length}, }; return check_free_space_extents(trans, fs_info, cache, path, @@ -172,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans, int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, + cache->length); if (ret) { test_err("could not remove free space"); return ret; @@ -190,13 +190,12 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid + alignment, - cache->key.offset - alignment}, + {cache->start + alignment, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, alignment); + cache->start, alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -214,14 +213,13 @@ static int test_remove_end(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, cache->key.offset - alignment}, + {cache->start, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid + - cache->key.offset - alignment, - alignment); + cache->start + cache->length - alignment, + alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -238,14 +236,13 @@ static int test_remove_middle(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, alignment}, - {cache->key.objectid + 2 * alignment, - cache->key.offset - 2 * alignment}, + {cache->start, alignment}, + {cache->start + 2 * alignment, cache->length - 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not remove free space"); @@ -263,19 +260,18 @@ static int test_merge_left(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, 2 * alignment}, + {cache->start, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -283,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); @@ -301,20 +297,19 @@ static int test_merge_right(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid + alignment, 2 * alignment}, + {cache->start + alignment, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); @@ -322,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); @@ -340,19 +335,18 @@ static int test_merge_both(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, 3 * alignment}, + {cache->start, 3 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -360,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, - alignment); + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, - alignment); + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -386,21 +378,20 @@ static int test_merge_none(struct btrfs_trans_handle *trans, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, alignment}, - {cache->key.objectid + 2 * alignment, alignment}, - {cache->key.objectid + 4 * alignment, alignment}, + {cache->start, alignment}, + {cache->start + 2 * alignment, alignment}, + {cache->start + 4 * alignment, alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -408,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 4 * alignment, - alignment); + cache->start + 4 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, - alignment); + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2a0637342f08..f534a6a5553e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3194,16 +3194,16 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off if (bargs->usage_min == 0) user_thresh_min = 0; else - user_thresh_min = div_factor_fine(cache->key.offset, - bargs->usage_min); + user_thresh_min = div_factor_fine(cache->length, + bargs->usage_min); if (bargs->usage_max == 0) user_thresh_max = 1; else if (bargs->usage_max > 100) - user_thresh_max = cache->key.offset; + user_thresh_max = cache->length; else - user_thresh_max = div_factor_fine(cache->key.offset, - bargs->usage_max); + user_thresh_max = div_factor_fine(cache->length, + bargs->usage_max); if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) ret = 0; @@ -3225,10 +3225,9 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, if (bargs->usage_min == 0) user_thresh = 1; else if (bargs->usage > 100) - user_thresh = cache->key.offset; + user_thresh = cache->length; else - user_thresh = div_factor_fine(cache->key.offset, - bargs->usage); + user_thresh = div_factor_fine(cache->length, bargs->usage); if (chunk_used < user_thresh) ret = 0; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 7b842b049ea3..070619891915 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -713,8 +713,8 @@ TRACE_EVENT(btrfs_add_block_group, ), TP_fast_assign_btrfs(fs_info, - __entry->offset = block_group->key.objectid; - __entry->size = block_group->key.offset; + __entry->offset = block_group->start; + __entry->size = block_group->length; __entry->flags = block_group->flags; __entry->bytes_used = block_group->used; __entry->bytes_super = block_group->bytes_super; @@ -1197,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->len = len; @@ -1245,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->bytes = bytes; @@ -1272,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; ), TP_printk_btrfs("block_group=%llu", __entry->bg_objectid) @@ -1296,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = cluster->window_start; __entry->max_size = cluster->max_size; @@ -1856,8 +1856,8 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ), TP_fast_assign_btrfs(bg_cache->fs_info, - __entry->bytenr = bg_cache->key.objectid, - __entry->len = bg_cache->key.offset, + __entry->bytenr = bg_cache->start, + __entry->len = bg_cache->length, __entry->used = bg_cache->used; __entry->flags = bg_cache->flags; ), -- cgit v1.2.3-59-g8ed1b From f5389f330d6f28d648c51624f059b0bfdd9b1806 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 24 Oct 2019 17:44:54 +0200 Subject: btrfs: remove cached space_info in btrfs_statfs() In btrfs_statfs() we cache fs_info::space_info in a local variable only to use it once in a list_for_each_rcu() statement. Not only is the local variable unnecessary it even makes the code harder to follow as it's not clear which list it is iterating. Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/super.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 792bb0bc9a0b..3f49407cc2aa 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2021,7 +2021,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); struct btrfs_super_block *disk_super = fs_info->super_copy; - struct list_head *head = &fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; u64 total_free_data = 0; @@ -2035,7 +2034,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) int mixed = 0; rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) { + list_for_each_entry_rcu(found, &fs_info->space_info, list) { if (found->flags & BTRFS_BLOCK_GROUP_DATA) { int i; -- cgit v1.2.3-59-g8ed1b From bf2df5aed1c881f79b001ee19642e386bcda6d80 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 25 Oct 2019 10:53:12 +0100 Subject: Btrfs: remove wait queue from space_info structure It is not used anymore since commit 957780eb2788d8 ("Btrfs: introduce ticketed enospc infrastructure"), so just remove it. Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 1 - fs/btrfs/space-info.h | 1 - 2 files changed, 2 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 3a2b0d1155a8..22b4968699e1 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -58,7 +58,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) spin_lock_init(&space_info->lock); space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; - init_waitqueue_head(&space_info->wait); INIT_LIST_HEAD(&space_info->ro_bgs); INIT_LIST_HEAD(&space_info->tickets); INIT_LIST_HEAD(&space_info->priority_tickets); diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 2d8c811a9792..1a349e3f9cc1 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -63,7 +63,6 @@ struct btrfs_space_info { struct rw_semaphore groups_sem; /* for block groups in our same type */ struct list_head block_groups[BTRFS_NR_RAID_TYPES]; - wait_queue_head_t wait; struct kobject kobj; struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; -- cgit v1.2.3-59-g8ed1b From 16ad3be1752a7b8ea18f0f97e8383fec0e470e7c Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 25 Oct 2019 10:52:42 +0100 Subject: Btrfs: remove unnecessary delalloc mutex for inodes The inode delalloc mutex was added a long time ago by commit f248679e86fea ("Btrfs: add a delalloc mutex to inodes for delalloc reservations"), and the reason for its introduction is not very clear from the change log. It claims it solves bogus warnings from lockdep, however it lacks an example report/warning from lockdep, or any explanation. Since we have enough concurrentcy protection from the locks of the space info and block reserve objects, and such lockdep warnings don't seem to exist anymore (at least on a 5.3 kernel I couldn't get them with fstests, ltp, fs_mark, etc), remove it, simplifying things a bit and decreasing the size of the btrfs_inode structure. With some quick fio tests doing direct IO and mmap writes I couldn't observe any significant performance increase either (direct IO writes that don't increase the file's size don't hold the inode's lock for their entire duration and mmap writes don't hold the inode's lock at all), which are the only type of writes that could see any performance gain due to less serialization. Review feedback from Josef: The problem was taking the i_mutex in mmap, which is how I was protecting delalloc reservations originally. The delalloc mutex didn't come with all of the other dependencies. That's what the lockdep messages were about, removing the lock isn't going to make them appear again. We _had_ to lock around this because we used to do tricks to keep from over-reserving, and if we didn't serialize delalloc reservations we'd end up with ugly accounting problems when we tried to clean things up. However with my recentish changes this isn't the case anymore. Every operation is responsible for reserving its space, and then adding it to the inode. Then cleaning up is straightforward and can't be mucked up by other users. So we no longer need the delalloc mutex to safe us from ourselves. Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/btrfs_inode.h | 3 --- fs/btrfs/delalloc-space.c | 21 +++++---------------- fs/btrfs/inode.c | 1 - 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index f853835c409c..4e12a477d32e 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -63,9 +63,6 @@ struct btrfs_inode { /* held while logging the inode in tree-log.c */ struct mutex log_mutex; - /* held while doing delalloc reservations */ - struct mutex delalloc_mutex; - /* used to order data wrt metadata */ struct btrfs_ordered_inode_tree ordered_tree; diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index db9f2c58eb4a..4cdac4d834f5 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) unsigned nr_extents; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; int ret = 0; - bool delalloc_lock = true; /* * If we are a free space inode we need to not flush since we will be in @@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) */ if (btrfs_is_free_space_inode(inode)) { flush = BTRFS_RESERVE_NO_FLUSH; - delalloc_lock = false; } else { if (current->journal_info) flush = BTRFS_RESERVE_FLUSH_LIMIT; @@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) schedule_timeout(1); } - if (delalloc_lock) - mutex_lock(&inode->delalloc_mutex); - num_bytes = ALIGN(num_bytes, fs_info->sectorsize); /* @@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) &qgroup_reserve); ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true); if (ret) - goto out_fail; + return ret; ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush); - if (ret) - goto out_qgroup; + if (ret) { + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve); + return ret; + } /* * Now we need to update our outstanding extents and csum bytes _first_ @@ -375,15 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) block_rsv->qgroup_rsv_reserved += qgroup_reserve; spin_unlock(&block_rsv->lock); - if (delalloc_lock) - mutex_unlock(&inode->delalloc_mutex); return 0; -out_qgroup: - btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve); -out_fail: - if (delalloc_lock) - mutex_unlock(&inode->delalloc_mutex); - return ret; } /** diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5edc6a27ea4a..4c074f4bccbb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9356,7 +9356,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->io_failure_tree.track_uptodate = true; atomic_set(&ei->sync_writers, 0); mutex_init(&ei->log_mutex); - mutex_init(&ei->delalloc_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->delalloc_inodes); INIT_LIST_HEAD(&ei->delayed_iput); -- cgit v1.2.3-59-g8ed1b From 01f0f9da9dc8b4d60921af92bc9329a1eb2fa8e0 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:17 +0300 Subject: btrfs: Cleanup and simplify find_newest_super_backup Backup roots are always written in a circular manner. By definition we can only ever have 1 backup root whose generation equals to that of the superblock. Hence, the 'if' in the for loop will trigger at most once. This is sufficient to return the newest backup root. Furthermore the newest_gen parameter is always set to the generation of the superblock. This value can be obtained from the fs_info. This patch removes the unnecessary code dealing with the wraparound case and makes 'newest_gen' a local variable. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7c345c4bc817..9d24bd3de595 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1790,18 +1790,18 @@ sleep: } /* - * this will find the highest generation in the array of - * root backups. The index of the highest array is returned, - * or -1 if we can't find anything. + * This will find the highest generation in the array of root backups. The + * index of the highest array is returned, or -EINVAL if we can't find + * anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it. */ -static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) +static int find_newest_super_backup(struct btrfs_fs_info *info) { + const u64 newest_gen = btrfs_super_generation(info->super_copy); u64 cur; - int newest_index = -1; struct btrfs_root_backup *root_backup; int i; @@ -1809,17 +1809,10 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) root_backup = info->super_copy->super_roots + i; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) - newest_index = i; + return i; } - /* check to see if we actually wrapped around */ - if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { - root_backup = info->super_copy->super_roots; - cur = btrfs_backup_tree_root_gen(root_backup); - if (cur == newest_gen) - newest_index = 0; - } - return newest_index; + return -EINVAL; } @@ -1831,11 +1824,11 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) static void find_oldest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) { - int newest_index = -1; + int newest_index; - newest_index = find_newest_super_backup(info, newest_gen); + newest_index = find_newest_super_backup(info); /* if there was garbage in there, just move along */ - if (newest_index == -1) { + if (newest_index == -EINVAL) { info->backup_root_index = 0; } else { info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; @@ -1952,9 +1945,7 @@ static noinline int next_root_backup(struct btrfs_fs_info *info, int newest = *backup_index; if (*num_backups_tried == 0) { - u64 gen = btrfs_super_generation(super); - - newest = find_newest_super_backup(info, gen); + newest = find_newest_super_backup(info); if (newest == -1) return -1; -- cgit v1.2.3-59-g8ed1b From fc2e4c5b3508f4d0ab981be9aa55bfb80bf971d1 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:18 +0300 Subject: btrfs: Remove newest_gen argument from find_oldest_super_backup It's no longer needed following cleanups around find_newest_backup_root Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9d24bd3de595..1834ff4fabee 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1821,8 +1821,7 @@ static int find_newest_super_backup(struct btrfs_fs_info *info) * in the backup array. This will set the backup_root_index * field in the fs_info struct */ -static void find_oldest_super_backup(struct btrfs_fs_info *info, - u64 newest_gen) +static void find_oldest_super_backup(struct btrfs_fs_info *info) { int newest_index; @@ -2860,8 +2859,7 @@ int __cold open_ctree(struct super_block *sb, * run through our array of backup supers and setup * our ring pointer to the oldest one */ - generation = btrfs_super_generation(disk_super); - find_oldest_super_backup(fs_info, generation); + find_oldest_super_backup(fs_info); /* * In the long term, we'll store the compression type in the super -- cgit v1.2.3-59-g8ed1b From bd2336b2ac119e65457261528883d9623d41315f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:19 +0300 Subject: btrfs: Add read_backup_root This function will replace next_root_backup with a much saner/cleaner interface. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1834ff4fabee..4d061facb416 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1928,6 +1928,50 @@ static void backup_super_roots(struct btrfs_fs_info *info) sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); } +/* + * read_backup_root - Reads a backup root based on the passed priority. Prio 0 + * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots + * + * fs_info - filesystem whose backup roots need to be read + * priority - priority of backup root required + * + * Returns backup root index on success and -EINVAL otherwise. + */ +static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) +{ + int backup_index = find_newest_super_backup(fs_info); + struct btrfs_super_block *super = fs_info->super_copy; + struct btrfs_root_backup *root_backup; + + if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { + if (priority == 0) + return backup_index; + + backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; + backup_index %= BTRFS_NUM_BACKUP_ROOTS; + } else { + return -EINVAL; + } + + root_backup = super->super_roots + backup_index; + + btrfs_set_super_generation(super, + btrfs_backup_tree_root_gen(root_backup)); + btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); + btrfs_set_super_root_level(super, + btrfs_backup_tree_root_level(root_backup)); + btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); + + /* + * Fixme: the total bytes and num_devices need to match or we should + * need a fsck + */ + btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); + btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); + + return backup_index; +} + /* * this copies info out of the root backup array and back into * the in-memory super block. It is meant to help iterate through -- cgit v1.2.3-59-g8ed1b From b8522a1e5f421e003a8d2a8937eb3a095ce33f5d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:20 +0300 Subject: btrfs: Factor out tree roots initialization during mount The code responsible for reading and initializing tree roots is scattered in open_ctree among 2 labels, emulating a loop. This is rather confusing to reason about. Instead, factor the code to a new function, init_tree_roots which implements the same logical flow. There are a couple of notable differences, namely: * Instead of using next_backup_root it's using the newly introduced read_backup_root. * If read_backup_root returns an error init_tree_roots propagates the error and there is no special handling of that case e.g. the code jumps straight to 'fail_tree_roots' label. The old code, however, was (erroneously) jumping to 'fail_block_groups' label if next_backup_root did fail, this was unnecessary since the tree roots init logic doesn't modify the state of block groups. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 142 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 85 insertions(+), 57 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4d061facb416..1b1f051ef33f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2610,6 +2610,89 @@ out: return ret; } +int __cold init_tree_roots(struct btrfs_fs_info *fs_info) +{ + struct btrfs_super_block *sb = fs_info->super_copy; + struct btrfs_root *tree_root = fs_info->tree_root; + bool handle_error = false; + int ret = 0; + int i; + + for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { + u64 generation; + int level; + + if (handle_error) { + if (!IS_ERR(tree_root->node)) + free_extent_buffer(tree_root->node); + tree_root->node = NULL; + + if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) + break; + + free_root_pointers(fs_info, 0); + + /* + * Don't use the log in recovery mode, it won't be + * valid + */ + btrfs_set_super_log_root(sb, 0); + + /* We can't trust the free space cache either */ + btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); + + ret = read_backup_root(fs_info, i); + if (ret < 0) + return ret; + } + generation = btrfs_super_generation(sb); + level = btrfs_super_root_level(sb); + tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), + generation, level, NULL); + if (IS_ERR(tree_root->node) || + !extent_buffer_uptodate(tree_root->node)) { + handle_error = true; + + if (IS_ERR(tree_root->node)) + ret = PTR_ERR(tree_root->node); + else if (!extent_buffer_uptodate(tree_root->node)) + ret = -EUCLEAN; + + btrfs_warn(fs_info, "failed to read tree root"); + continue; + } + + btrfs_set_root_node(&tree_root->root_item, tree_root->node); + tree_root->commit_root = btrfs_root_node(tree_root); + btrfs_set_root_refs(&tree_root->root_item, 1); + + mutex_lock(&tree_root->objectid_mutex); + ret = btrfs_find_highest_objectid(tree_root, + &tree_root->highest_objectid); + if (ret < 0) { + mutex_unlock(&tree_root->objectid_mutex); + handle_error = true; + continue; + } + + ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); + mutex_unlock(&tree_root->objectid_mutex); + + ret = btrfs_read_roots(fs_info); + if (ret < 0) { + handle_error = true; + continue; + } + + /* All successful */ + fs_info->generation = generation; + fs_info->last_trans_committed = generation; + break; + } + + return ret; +} + int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) @@ -2628,8 +2711,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_root *chunk_root; int ret; int err = -EINVAL; - int num_backups_tried = 0; - int backup_index = 0; int clear_free_space_tree = 0; int level; @@ -3051,44 +3132,9 @@ int __cold open_ctree(struct super_block *sb, goto fail_tree_roots; } -retry_root_backup: - generation = btrfs_super_generation(disk_super); - level = btrfs_super_root_level(disk_super); - - tree_root->node = read_tree_block(fs_info, - btrfs_super_root(disk_super), - generation, level, NULL); - if (IS_ERR(tree_root->node) || - !extent_buffer_uptodate(tree_root->node)) { - btrfs_warn(fs_info, "failed to read tree root"); - if (!IS_ERR(tree_root->node)) - free_extent_buffer(tree_root->node); - tree_root->node = NULL; - goto recovery_tree_root; - } - - btrfs_set_root_node(&tree_root->root_item, tree_root->node); - tree_root->commit_root = btrfs_root_node(tree_root); - btrfs_set_root_refs(&tree_root->root_item, 1); - - mutex_lock(&tree_root->objectid_mutex); - ret = btrfs_find_highest_objectid(tree_root, - &tree_root->highest_objectid); - if (ret) { - mutex_unlock(&tree_root->objectid_mutex); - goto recovery_tree_root; - } - - ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); - - mutex_unlock(&tree_root->objectid_mutex); - - ret = btrfs_read_roots(fs_info); + ret = init_tree_roots(fs_info); if (ret) - goto recovery_tree_root; - - fs_info->generation = generation; - fs_info->last_trans_committed = generation; + goto fail_tree_roots; ret = btrfs_verify_dev_extents(fs_info); if (ret) { @@ -3383,24 +3429,6 @@ fail: btrfs_free_stripe_hash_table(fs_info); btrfs_close_devices(fs_info->fs_devices); return err; - -recovery_tree_root: - if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) - goto fail_tree_roots; - - free_root_pointers(fs_info, false); - - /* don't use the log in recovery mode, it won't be valid */ - btrfs_set_super_log_root(disk_super, 0); - - /* we can't trust the free space cache either */ - btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); - - ret = next_root_backup(fs_info, fs_info->super_copy, - &num_backups_tried, &backup_index); - if (ret == -1) - goto fail_block_groups; - goto retry_root_backup; } ALLOW_ERROR_INJECTION(open_ctree, ERRNO); -- cgit v1.2.3-59-g8ed1b From 336a0d8df1d9ec2480dc1790f0d7801c32318d1c Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:21 +0300 Subject: btrfs: Don't use objectid_mutex during mount Since the filesystem is not well formed and no trees are loaded it's pointless holding the objectid_mutex. Just remove its usage. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1b1f051ef33f..b23c9fbc2e89 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2666,17 +2666,18 @@ int __cold init_tree_roots(struct btrfs_fs_info *fs_info) tree_root->commit_root = btrfs_root_node(tree_root); btrfs_set_root_refs(&tree_root->root_item, 1); - mutex_lock(&tree_root->objectid_mutex); + /* + * No need to hold btrfs_root::objectid_mutex since the fs + * hasn't been fully initialised and we are the only user + */ ret = btrfs_find_highest_objectid(tree_root, &tree_root->highest_objectid); if (ret < 0) { - mutex_unlock(&tree_root->objectid_mutex); handle_error = true; continue; } ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); - mutex_unlock(&tree_root->objectid_mutex); ret = btrfs_read_roots(fs_info); if (ret < 0) { -- cgit v1.2.3-59-g8ed1b From 260eb11bd42b4b5bcd671a1f8cb7d7b9a3e36b8d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:22 +0300 Subject: btrfs: Remove unused next_root_backup function This function has been superseded by previous commits and is no longer used so just remove it. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 50 -------------------------------------------------- 1 file changed, 50 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b23c9fbc2e89..99ba3278628c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1972,56 +1972,6 @@ static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) return backup_index; } -/* - * this copies info out of the root backup array and back into - * the in-memory super block. It is meant to help iterate through - * the array, so you send it the number of backups you've already - * tried and the last backup index you used. - * - * this returns -1 when it has tried all the backups - */ -static noinline int next_root_backup(struct btrfs_fs_info *info, - struct btrfs_super_block *super, - int *num_backups_tried, int *backup_index) -{ - struct btrfs_root_backup *root_backup; - int newest = *backup_index; - - if (*num_backups_tried == 0) { - newest = find_newest_super_backup(info); - if (newest == -1) - return -1; - - *backup_index = newest; - *num_backups_tried = 1; - } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { - /* we've tried all the backups, all done */ - return -1; - } else { - /* jump to the next oldest backup */ - newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % - BTRFS_NUM_BACKUP_ROOTS; - *backup_index = newest; - *num_backups_tried += 1; - } - root_backup = super->super_roots + newest; - - btrfs_set_super_generation(super, - btrfs_backup_tree_root_gen(root_backup)); - btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); - btrfs_set_super_root_level(super, - btrfs_backup_tree_root_level(root_backup)); - btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); - - /* - * fixme: the total bytes and num_devices need to match or we should - * need a fsck - */ - btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); - btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); - return 0; -} - /* helper to cleanup workers */ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) { -- cgit v1.2.3-59-g8ed1b From 4ac039ad75df3f5f5f5b2b7439f094ad5bd9dbbe Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:23 +0300 Subject: btrfs: Rename find_oldest_super_backup to init_backup_root_slot The old name name was an awful misnomer because it didn't really find the oldest super backup per-se but rather its slot. For example if we have: slot0: gen - 2 slot1: gen - 1 slot2: gen slot3: empty init_backup_root_slot will return slot3 and not slot0. The new name is more appropriate since the function doesn't care whether there is a valid backup in the returned slot or not. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 99ba3278628c..6c44d1781f21 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1815,13 +1815,11 @@ static int find_newest_super_backup(struct btrfs_fs_info *info) return -EINVAL; } - /* - * find the oldest backup so we know where to store new entries - * in the backup array. This will set the backup_root_index - * field in the fs_info struct + * Initialize backup_root_index with the next available slot, where subsequent + * transaction commit will store the backup root */ -static void find_oldest_super_backup(struct btrfs_fs_info *info) +static void init_backup_root_slot(struct btrfs_fs_info *info) { int newest_index; @@ -2935,7 +2933,7 @@ int __cold open_ctree(struct super_block *sb, * run through our array of backup supers and setup * our ring pointer to the oldest one */ - find_oldest_super_backup(fs_info); + init_backup_root_slot(fs_info); /* * In the long term, we'll store the compression type in the super -- cgit v1.2.3-59-g8ed1b From 6ef108dd0cda65d3031166222e64cdf71fdd3359 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 15 Oct 2019 18:42:24 +0300 Subject: btrfs: Streamline btrfs_fs_info::backup_root_index semantics The backup_root_index member stores the index at which the backup root should be saved upon next transaction commit. However, there is a small deviation from this behavior in the form of a check in backup_super_roots which checks if current root generation equals to the generation of the previous root. This can trigger in the following scenario: slot0: gen-2 slot1: gen-1 slot2: gen slot3: unused Now suppose slot3 (which is also the root specified in the super block) is corrupted hence init_tree_roots chooses to use the backup root at slot2, meaning read_backup_root will read slot2 and assign the superblock generation to gen-1. Despite this backup_root_index will point at slot3 because its init happens in init_backup_root_slot, long before any parsing of the backup roots occur. Then on next transaction start, gen-1 will be incremented by 1 making the root's generation equal gen. Subsequently, on transaction commit the following check triggers: if (btrfs_backup_tree_root_gen(root_backup) == btrfs_header_generation(info->tree_root->node)) This causes the 'next_backup', which is the index at which the backup is going to be written to, to set to last_backup, which will be slot2. All of this is a very confusing way of expressing the following invariant: Always write a backup root at the index following the last used backup root. This commit streamlines this logic by setting backup_root_index to the next index after the one used for mount. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 51 ++++++++++++--------------------------------------- 1 file changed, 12 insertions(+), 39 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6c44d1781f21..ddb39f76f71a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1815,23 +1815,6 @@ static int find_newest_super_backup(struct btrfs_fs_info *info) return -EINVAL; } -/* - * Initialize backup_root_index with the next available slot, where subsequent - * transaction commit will store the backup root - */ -static void init_backup_root_slot(struct btrfs_fs_info *info) -{ - int newest_index; - - newest_index = find_newest_super_backup(info); - /* if there was garbage in there, just move along */ - if (newest_index == -EINVAL) { - info->backup_root_index = 0; - } else { - info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; - } -} - /* * copy all the root pointers into the super backup array. * this will bump the backup pointer by one when it is @@ -1839,22 +1822,8 @@ static void init_backup_root_slot(struct btrfs_fs_info *info) */ static void backup_super_roots(struct btrfs_fs_info *info) { - int next_backup; + const int next_backup = info->backup_root_index; struct btrfs_root_backup *root_backup; - int last_backup; - - next_backup = info->backup_root_index; - last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % - BTRFS_NUM_BACKUP_ROOTS; - - /* - * just overwrite the last backup if we're at the same generation - * this happens only at umount - */ - root_backup = info->super_for_commit->super_roots + last_backup; - if (btrfs_backup_tree_root_gen(root_backup) == - btrfs_header_generation(info->tree_root->node)) - next_backup = last_backup; root_backup = info->super_for_commit->super_roots + next_backup; @@ -2558,8 +2527,9 @@ out: return ret; } -int __cold init_tree_roots(struct btrfs_fs_info *fs_info) +static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) { + int backup_index = find_newest_super_backup(fs_info); struct btrfs_super_block *sb = fs_info->super_copy; struct btrfs_root *tree_root = fs_info->tree_root; bool handle_error = false; @@ -2590,6 +2560,7 @@ int __cold init_tree_roots(struct btrfs_fs_info *fs_info) btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); ret = read_backup_root(fs_info, i); + backup_index = ret; if (ret < 0) return ret; } @@ -2636,6 +2607,14 @@ int __cold init_tree_roots(struct btrfs_fs_info *fs_info) /* All successful */ fs_info->generation = generation; fs_info->last_trans_committed = generation; + + /* Always begin writing backup roots after the one being used */ + if (backup_index < 0) { + fs_info->backup_root_index = 0; + } else { + fs_info->backup_root_index = backup_index + 1; + fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; + } break; } @@ -2929,12 +2908,6 @@ int __cold open_ctree(struct super_block *sb, if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); - /* - * run through our array of backup supers and setup - * our ring pointer to the oldest one - */ - init_backup_root_slot(fs_info); - /* * In the long term, we'll store the compression type in the super * block, and it'll be used for per file compression control. -- cgit v1.2.3-59-g8ed1b From 3ec17a67cc340b7b0353d7ac6d2746dcf8983b43 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 31 Oct 2019 13:55:01 +0300 Subject: btrfs: clean up locking name in scrub_enumerate_chunks() The "&fs_info->dev_replace.rwsem" and "&dev_replace->rwsem" refer to the same lock but Smatch is not clever enough to figure that out so it leads to static checker warnings. It's better to use it consistently anyway. Signed-off-by: Dan Carpenter Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/scrub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4a5a4e4ef707..06494304ab80 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3620,7 +3620,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, break; } - down_write(&fs_info->dev_replace.rwsem); + down_write(&dev_replace->rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; @@ -3661,10 +3661,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); - down_write(&fs_info->dev_replace.rwsem); + down_write(&dev_replace->rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; - up_write(&fs_info->dev_replace.rwsem); + up_write(&dev_replace->rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); -- cgit v1.2.3-59-g8ed1b From 153a6d299956983de890ffaf429e5904503eb94c Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 3 Sep 2019 07:46:19 +0800 Subject: btrfs: tree-checker: Check item size before reading file extent type In check_extent_data_item(), we read file extent type without verifying if the item size is valid. Add such check to ensure the file extent type we read is correct. The check is not as accurate as we need to cover both inline and regular extents, so it only checks if the item size is larger or equal to inline header. So the existing size checks on inline/regular extents are still needed. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 6638483e8ad8..493d4d9e0f79 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -221,6 +221,17 @@ static int check_extent_data_item(struct extent_buffer *leaf, fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + /* + * Make sure the item contains at least inline header, so the file + * extent type is not some garbage. + */ + if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) { + file_extent_err(leaf, slot, + "invalid item size, have %u expect [%lu, %u)", + item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START, + SZ_4K); + return -EUCLEAN; + } if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) { file_extent_err(leaf, slot, "invalid type for file extent, have %u expect range [0, %u]", -- cgit v1.2.3-59-g8ed1b From 2d974619a77f106f3d1341686dea95c0eaad601f Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Oct 2019 21:57:26 +0800 Subject: btrfs: volumes: Use more straightforward way to calculate map length The old code goes: offset = logical - em->start; length = min_t(u64, em->len - offset, length); Where @length calculation is dependent on offset, it can take reader several more seconds to find it's just the same code as: offset = logical - em->start; length = min_t(u64, em->start + em->len - logical, length); Use above code to make the length calculate independent from other variable, thus slightly increase the readability. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f534a6a5553e..6a0288d17b7d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5412,7 +5412,7 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, } offset = logical - em->start; - length = min_t(u64, em->len - offset, length); + length = min_t(u64, em->start + em->len - logical, length); stripe_len = map->stripe_len; /* -- cgit v1.2.3-59-g8ed1b From 6b7faadd985c990324b5b5bd18cc4ba5c395eb65 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Oct 2019 21:57:27 +0800 Subject: btrfs: Ensure we trim ranges across block group boundary [BUG] When deleting large files (which cross block group boundary) with discard mount option, we find some btrfs_discard_extent() calls only trimmed part of its space, not the whole range: btrfs_discard_extent: type=0x1 start=19626196992 len=2144530432 trimmed=1073741824 ratio=50% type: bbio->map_type, in above case, it's SINGLE DATA. start: Logical address of this trim len: Logical length of this trim trimmed: Physically trimmed bytes ratio: trimmed / len Thus leaving some unused space not discarded. [CAUSE] When discard mount option is specified, after a transaction is fully committed (super block written to disk), we begin to cleanup pinned extents in the following call chain: btrfs_commit_transaction() |- btrfs_finish_extent_commit() |- find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); |- btrfs_discard_extent() However, pinned extents are recorded in an extent_io_tree, which can merge adjacent extent states. When a large file gets deleted and it has adjacent file extents across block group boundary, we will get a large merged range like this: |<--- BG1 --->|<--- BG2 --->| |//////|<-- Range to discard --->|/////| To discard that range, we have the following calls: btrfs_discard_extent() |- btrfs_map_block() | Returned bbio will end at BG1's end. As btrfs_map_block() | never returns result across block group boundary. |- btrfs_issuse_discard() Issue discard for each stripe. So we will only discard the range in BG1, not the remaining part in BG2. Furthermore, this bug is not that reliably observed, for above case, if there is no other extent in BG2, BG2 will be empty and btrfs will trim all space of BG2, covering up the bug. [FIX] - Allow __btrfs_map_block_for_discard() to modify @length parameter btrfs_map_block() uses its @length paramter to notify the caller how many bytes are mapped in current call. With __btrfs_map_block_for_discard() also modifing the @length, btrfs_discard_extent() now understands when to do extra trim. - Call btrfs_map_block() in a loop until we hit the range end Since we now know how many bytes are mapped each time, we can iterate through each block group boundary and issue correct trim for each range. Reviewed-by: Filipe Manana Reviewed-by: Nikolay Borisov Tested-by: Nikolay Borisov Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 41 +++++++++++++++++++++++++++++++---------- fs/btrfs/volumes.c | 6 ++++-- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b24d116eaf0b..f9091bd41e99 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1306,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, u64 *actual_bytes) { - int ret; + int ret = 0; u64 discarded_bytes = 0; + u64 end = bytenr + num_bytes; + u64 cur = bytenr; struct btrfs_bio *bbio = NULL; @@ -1316,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, * associated to its stripes that don't go away while we are discarding. */ btrfs_bio_counter_inc_blocked(fs_info); - /* Tell the block device(s) that the sectors can be discarded */ - ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes, - &bbio, 0); - /* Error condition is -ENOMEM */ - if (!ret) { - struct btrfs_bio_stripe *stripe = bbio->stripes; + while (cur < end) { + struct btrfs_bio_stripe *stripe; int i; + num_bytes = end - cur; + /* Tell the block device(s) that the sectors can be discarded */ + ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur, + &num_bytes, &bbio, 0); + /* + * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or + * -EOPNOTSUPP. For any such error, @num_bytes is not updated, + * thus we can't continue anyway. + */ + if (ret < 0) + goto out; + stripe = bbio->stripes; for (i = 0; i < bbio->num_stripes; i++, stripe++) { u64 bytes; struct request_queue *req_q; @@ -1341,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, stripe->physical, stripe->length, &bytes); - if (!ret) + if (!ret) { discarded_bytes += bytes; - else if (ret != -EOPNOTSUPP) - break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */ + } else if (ret != -EOPNOTSUPP) { + /* + * Logic errors or -ENOMEM, or -EIO, but + * unlikely to happen. + * + * And since there are two loops, explicitly + * go to out to avoid confusion. + */ + btrfs_put_bbio(bbio); + goto out; + } /* * Just in case we get back EOPNOTSUPP for some reason, @@ -1354,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ret = 0; } btrfs_put_bbio(bbio); + cur += num_bytes; } +out: btrfs_bio_counter_dec(fs_info); if (actual_bytes) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6a0288d17b7d..e6c458d99722 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5374,12 +5374,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio) * replace. */ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, - u64 logical, u64 length, + u64 logical, u64 *length_ret, struct btrfs_bio **bbio_ret) { struct extent_map *em; struct map_lookup *map; struct btrfs_bio *bbio; + u64 length = *length_ret; u64 offset; u64 stripe_nr; u64 stripe_nr_end; @@ -5413,6 +5414,7 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, offset = logical - em->start; length = min_t(u64, em->start + em->len - logical, length); + *length_ret = length; stripe_len = map->stripe_len; /* @@ -5827,7 +5829,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, if (op == BTRFS_MAP_DISCARD) return __btrfs_map_block_for_discard(fs_info, logical, - *length, bbio_ret); + length, bbio_ret); ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); if (ret < 0) -- cgit v1.2.3-59-g8ed1b From 11f2069c113e02971b8db6fda62f9b9cd31a030f Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 30 Oct 2019 12:23:11 +0000 Subject: Btrfs: send, allow clone operations within the same file For send we currently skip clone operations when the source and destination files are the same. This is so because clone didn't support this case in its early days, but support for it was added back in May 2013 by commit a96fbc72884fcb ("Btrfs: allow file data clone within a file"). This change adds support for it. Example: $ mkfs.btrfs -f /dev/sdd $ mount /dev/sdd /mnt/sdd $ xfs_io -f -c "pwrite -S 0xab -b 64K 0 64K" /mnt/sdd/foobar $ xfs_io -c "reflink /mnt/sdd/foobar 0 64K 64K" /mnt/sdd/foobar $ btrfs subvolume snapshot -r /mnt/sdd /mnt/sdd/snap $ mkfs.btrfs -f /dev/sde $ mount /dev/sde /mnt/sde $ btrfs send /mnt/sdd/snap | btrfs receive /mnt/sde Without this change file foobar at the destination has a single 128Kb extent: $ filefrag -v /mnt/sde/snap/foobar Filesystem type is: 9123683e File size of /mnt/sde/snap/foobar is 131072 (32 blocks of 4096 bytes) ext: logical_offset: physical_offset: length: expected: flags: 0: 0.. 31: 0.. 31: 32: last,unknown_loc,delalloc,eof /mnt/sde/snap/foobar: 1 extent found With this we get a single 64Kb extent that is shared at file offsets 0 and 64K, just like in the source filesystem: $ filefrag -v /mnt/sde/snap/foobar Filesystem type is: 9123683e File size of /mnt/sde/snap/foobar is 131072 (32 blocks of 4096 bytes) ext: logical_offset: physical_offset: length: expected: flags: 0: 0.. 15: 3328.. 3343: 16: shared 1: 16.. 31: 3328.. 3343: 16: 3344: last,shared,eof /mnt/sde/snap/foobar: 2 extents found Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/send.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 27e92594a81b..cbf4909f5cd9 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1248,12 +1248,20 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) */ if (found->root == bctx->sctx->send_root) { /* - * TODO for the moment we don't accept clones from the inode - * that is currently send. We may change this when - * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same - * file. + * If the source inode was not yet processed we can't issue a + * clone operation, as the source extent does not exist yet at + * the destination of the stream. */ - if (ino >= bctx->cur_objectid) + if (ino > bctx->cur_objectid) + return 0; + /* + * We clone from the inode currently being sent as long as the + * source extent is already processed, otherwise we could try + * to clone from an extent that does not exist yet at the + * destination of the stream. + */ + if (ino == bctx->cur_objectid && + offset >= bctx->sctx->cur_inode_next_write_offset) return 0; } -- cgit v1.2.3-59-g8ed1b From fd0ddbe2509568b00df364156f47561e9f469f15 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 30 Oct 2019 12:23:01 +0000 Subject: Btrfs: send, skip backreference walking for extents with many references Backreference walking, which is used by send to figure if it can issue clone operations instead of write operations, can be very slow and use too much memory when extents have many references. This change simply skips backreference walking when an extent has more than 64 references, in which case we fallback to a write operation instead of a clone operation. This limit is conservative and in practice I observed no signicant slowdown with up to 100 references and still low memory usage up to that limit. This is a temporary workaround until there are speedups in the backref walking code, and as such it does not attempt to add extra interfaces or knobs to tweak the threshold. Reported-by: Atemu Link: https://lore.kernel.org/linux-btrfs/CAE4GHgkvqVADtS4AzcQJxo0Q1jKQgKaW3JGp3SGdoinVo=C9eQ@mail.gmail.com/T/#me55dc0987f9cc2acaa54372ce0492c65782be3fa CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/send.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index cbf4909f5cd9..ae2db5eb1549 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -24,6 +24,14 @@ #include "transaction.h" #include "compression.h" +/* + * Maximum number of references an extent can have in order for us to attempt to + * issue clone operations instead of write operations. This currently exists to + * avoid hitting limitations of the backreference walking code (taking a lot of + * time and using too much memory for extents with large number of references). + */ +#define SEND_MAX_EXTENT_REFS 64 + /* * A fs_path is a helper to dynamically build path names with unknown size. * It reallocates the internal buffer on demand. @@ -1310,6 +1318,7 @@ static int find_extent_clone(struct send_ctx *sctx, struct clone_root *cur_clone_root; struct btrfs_key found_key; struct btrfs_path *tmp_path; + struct btrfs_extent_item *ei; int compressed; u32 i; @@ -1357,7 +1366,6 @@ static int find_extent_clone(struct send_ctx *sctx, ret = extent_from_logical(fs_info, disk_byte, tmp_path, &found_key, &flags); up_read(&fs_info->commit_root_sem); - btrfs_release_path(tmp_path); if (ret < 0) goto out; @@ -1366,6 +1374,21 @@ static int find_extent_clone(struct send_ctx *sctx, goto out; } + ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0], + struct btrfs_extent_item); + /* + * Backreference walking (iterate_extent_inodes() below) is currently + * too expensive when an extent has a large number of references, both + * in time spent and used memory. So for now just fallback to write + * operations instead of clone operations when an extent has more than + * a certain amount of references. + */ + if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) { + ret = -ENOENT; + goto out; + } + btrfs_release_path(tmp_path); + /* * Setup the clone roots. */ -- cgit v1.2.3-59-g8ed1b From 57e5ffeb8779a49202d48fb890f9d04fa0273c1e Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 29 Oct 2019 18:28:55 +0100 Subject: btrfs: sink write_flags to __extent_writepage_io __extent_writepage reads write flags from wbc and passes both to __extent_writepage_io. This makes write_flags redundant and we can remove it. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a2f919087fa1..ceb3c028894e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3419,7 +3419,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, struct extent_page_data *epd, loff_t i_size, unsigned long nr_written, - unsigned int write_flags, int *nr_ret) + int *nr_ret) { struct extent_io_tree *tree = epd->tree; u64 start = page_offset(page); @@ -3435,6 +3435,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, size_t blocksize; int ret = 0; int nr = 0; + const unsigned int write_flags = wbc_to_write_flags(wbc); bool compressed; ret = btrfs_writepage_cow_fixup(page, start, page_end); @@ -3568,11 +3569,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, size_t pg_offset = 0; loff_t i_size = i_size_read(inode); unsigned long end_index = i_size >> PAGE_SHIFT; - unsigned int write_flags = 0; unsigned long nr_written = 0; - write_flags = wbc_to_write_flags(wbc); - trace___extent_writepage(page, inode, wbc); WARN_ON(!PageLocked(page)); @@ -3610,7 +3608,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } ret = __extent_writepage_io(inode, page, wbc, epd, - i_size, nr_written, write_flags, &nr); + i_size, nr_written, &nr); if (ret == 1) goto done_unlocked; -- cgit v1.2.3-59-g8ed1b From fac07d2b091542f7169978e4c1413747d8d0e965 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 29 Oct 2019 18:28:57 +0100 Subject: btrfs: sink write flags to cow_file_range_async In commit "Btrfs: use REQ_CGROUP_PUNT for worker thread submitted bios", cow_file_range_async gained wbc as a parameter and this makes passing write flags redundant. Set it inside the function and remove the parameter. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/inode.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4c074f4bccbb..5b48830513e0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1214,8 +1214,7 @@ static int cow_file_range_async(struct inode *inode, struct writeback_control *wbc, struct page *locked_page, u64 start, u64 end, int *page_started, - unsigned long *nr_written, - unsigned int write_flags) + unsigned long *nr_written) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); @@ -1227,6 +1226,7 @@ static int cow_file_range_async(struct inode *inode, int i; bool should_compress; unsigned nofs_flag; + const unsigned int write_flags = wbc_to_write_flags(wbc); unlock_extent(&BTRFS_I(inode)->io_tree, start, end); @@ -1737,7 +1737,6 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, { int ret; int force_cow = need_force_cow(inode, start, end); - unsigned int write_flags = wbc_to_write_flags(wbc); if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { ret = run_delalloc_nocow(inode, locked_page, start, end, @@ -1753,8 +1752,7 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags); ret = cow_file_range_async(inode, wbc, locked_page, start, end, - page_started, nr_written, - write_flags); + page_started, nr_written); } if (ret) btrfs_cleanup_ordered_extents(inode, locked_page, start, -- cgit v1.2.3-59-g8ed1b From 47e6f7423b9196ad6832d26cae52b7015f81ee7f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Mar 2018 22:56:53 +0100 Subject: btrfs: add support for 3-copy replication (raid1c3) Add new block group profile to store 3 copies in a simliar way that current RAID1 does. The profile attributes and constraints are defined in the raid table and used by the same code that already handles the 2-copy RAID1. The minimum number of devices is 3, the maximum number of devices/chunks that can be lost/damaged is 2. Like RAID6 but with 33% space utilization. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/super.c | 2 ++ fs/btrfs/volumes.c | 19 +++++++++++++++++-- fs/btrfs/volumes.h | 2 ++ include/uapi/linux/btrfs.h | 3 ++- include/uapi/linux/btrfs_tree.h | 6 +++++- 6 files changed, 30 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1c8f01eaf27c..aa1b437fb951 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -57,9 +57,9 @@ struct btrfs_ref; * filesystem data as well that can be used to read data in order to repair * read errors on other disks. * - * Current value is derived from RAID1 with 2 copies. + * Current value is derived from RAID1C3 with 3 copies. */ -#define BTRFS_MAX_MIRRORS (2 + 1) +#define BTRFS_MAX_MIRRORS (3 + 1) #define BTRFS_MAX_LEVEL 8 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3f49407cc2aa..a5aff138e2e0 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1935,6 +1935,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, num_stripes = nr_devices; else if (type & BTRFS_BLOCK_GROUP_RAID1) num_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID1C3) + num_stripes = 3; else if (type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = 4; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e6c458d99722..d17835fbd52f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -58,6 +58,18 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { .bg_flag = BTRFS_BLOCK_GROUP_RAID1, .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, }, + [BTRFS_RAID_RAID1C3] = { + .sub_stripes = 1, + .dev_stripes = 1, + .devs_max = 0, + .devs_min = 3, + .tolerated_failures = 2, + .devs_increment = 3, + .ncopies = 3, + .raid_name = "raid1c3", + .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, + .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, + }, [BTRFS_RAID_DUP] = { .sub_stripes = 1, .dev_stripes = 2, @@ -4839,8 +4851,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, sort(devices_info, ndevs, sizeof(struct btrfs_device_info), btrfs_cmp_device_info, NULL); - /* round down to number of usable stripes */ - ndevs = round_down(ndevs, devs_increment); + /* + * Round down to number of usable stripes, devs_increment can be any + * number so we can't use round_down() + */ + ndevs -= ndevs % devs_increment; if (ndevs < devs_min) { ret = -ENOSPC; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ac4ba8c57283..a4e26b84e1b9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -545,6 +545,8 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) return BTRFS_RAID_RAID10; else if (flags & BTRFS_BLOCK_GROUP_RAID1) return BTRFS_RAID_RAID1; + else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) + return BTRFS_RAID_RAID1C3; else if (flags & BTRFS_BLOCK_GROUP_DUP) return BTRFS_RAID_DUP; else if (flags & BTRFS_BLOCK_GROUP_RAID0) diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 3ee0678c0a83..ba22f91a3f5b 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -831,7 +831,8 @@ enum btrfs_err_code { BTRFS_ERROR_DEV_TGT_REPLACE, BTRFS_ERROR_DEV_MISSING_NOT_FOUND, BTRFS_ERROR_DEV_ONLY_WRITABLE, - BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS + BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS, + BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, }; #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 5160be1d7332..52b2964b0311 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -841,6 +841,7 @@ struct btrfs_dev_replace_item { #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) +#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9) #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ BTRFS_SPACE_INFO_GLOBAL_RSV) @@ -852,6 +853,7 @@ enum btrfs_raid_types { BTRFS_RAID_SINGLE, BTRFS_RAID_RAID5, BTRFS_RAID_RAID6, + BTRFS_RAID_RAID1C3, BTRFS_NR_RAID_TYPES }; @@ -861,6 +863,7 @@ enum btrfs_raid_types { #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ BTRFS_BLOCK_GROUP_RAID1 | \ + BTRFS_BLOCK_GROUP_RAID1C3 | \ BTRFS_BLOCK_GROUP_RAID5 | \ BTRFS_BLOCK_GROUP_RAID6 | \ BTRFS_BLOCK_GROUP_DUP | \ @@ -868,7 +871,8 @@ enum btrfs_raid_types { #define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ BTRFS_BLOCK_GROUP_RAID6) -#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1) +#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1 | \ + BTRFS_BLOCK_GROUP_RAID1C3) /* * We need a bit for restriper to be able to tell when chunks of type -- cgit v1.2.3-59-g8ed1b From 8d6fac0087e538173f34ca7431ed9b58581acf28 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Mar 2018 22:56:53 +0100 Subject: btrfs: add support for 4-copy replication (raid1c4) Add new block group profile to store 4 copies in a simliar way that current RAID1 does. The profile attributes and constraints are defined in the raid table and used by the same code that already handles the 2- and 3-copy RAID1. The minimum number of devices is 4, the maximum number of devices/chunks that can be lost/damaged is 3. There is no comparable traditional RAID level, the profile is added for future needs to accompany triple-parity and beyond. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/super.c | 2 ++ fs/btrfs/volumes.c | 12 ++++++++++++ fs/btrfs/volumes.h | 2 ++ include/uapi/linux/btrfs.h | 1 + include/uapi/linux/btrfs_tree.h | 6 +++++- 6 files changed, 24 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index aa1b437fb951..923a8804ae94 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -57,9 +57,9 @@ struct btrfs_ref; * filesystem data as well that can be used to read data in order to repair * read errors on other disks. * - * Current value is derived from RAID1C3 with 3 copies. + * Current value is derived from RAID1C4 with 4 copies. */ -#define BTRFS_MAX_MIRRORS (3 + 1) +#define BTRFS_MAX_MIRRORS (4 + 1) #define BTRFS_MAX_LEVEL 8 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a5aff138e2e0..a98c3c71fc54 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1937,6 +1937,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, num_stripes = 2; else if (type & BTRFS_BLOCK_GROUP_RAID1C3) num_stripes = 3; + else if (type & BTRFS_BLOCK_GROUP_RAID1C4) + num_stripes = 4; else if (type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = 4; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d17835fbd52f..9054203ae671 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -70,6 +70,18 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, }, + [BTRFS_RAID_RAID1C4] = { + .sub_stripes = 1, + .dev_stripes = 1, + .devs_max = 0, + .devs_min = 4, + .tolerated_failures = 3, + .devs_increment = 4, + .ncopies = 4, + .raid_name = "raid1c4", + .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, + .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, + }, [BTRFS_RAID_DUP] = { .sub_stripes = 1, .dev_stripes = 2, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a4e26b84e1b9..46987a2da786 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -547,6 +547,8 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) return BTRFS_RAID_RAID1; else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) return BTRFS_RAID_RAID1C3; + else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) + return BTRFS_RAID_RAID1C4; else if (flags & BTRFS_BLOCK_GROUP_DUP) return BTRFS_RAID_DUP; else if (flags & BTRFS_BLOCK_GROUP_RAID0) diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index ba22f91a3f5b..a2b761275bba 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -833,6 +833,7 @@ enum btrfs_err_code { BTRFS_ERROR_DEV_ONLY_WRITABLE, BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS, BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, + BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, }; #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 52b2964b0311..8e322e2c7e78 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -842,6 +842,7 @@ struct btrfs_dev_replace_item { #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) #define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9) +#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10) #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ BTRFS_SPACE_INFO_GLOBAL_RSV) @@ -854,6 +855,7 @@ enum btrfs_raid_types { BTRFS_RAID_RAID5, BTRFS_RAID_RAID6, BTRFS_RAID_RAID1C3, + BTRFS_RAID_RAID1C4, BTRFS_NR_RAID_TYPES }; @@ -864,6 +866,7 @@ enum btrfs_raid_types { #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ BTRFS_BLOCK_GROUP_RAID1 | \ BTRFS_BLOCK_GROUP_RAID1C3 | \ + BTRFS_BLOCK_GROUP_RAID1C4 | \ BTRFS_BLOCK_GROUP_RAID5 | \ BTRFS_BLOCK_GROUP_RAID6 | \ BTRFS_BLOCK_GROUP_DUP | \ @@ -872,7 +875,8 @@ enum btrfs_raid_types { BTRFS_BLOCK_GROUP_RAID6) #define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1 | \ - BTRFS_BLOCK_GROUP_RAID1C3) + BTRFS_BLOCK_GROUP_RAID1C3 | \ + BTRFS_BLOCK_GROUP_RAID1C4) /* * We need a bit for restriper to be able to tell when chunks of type -- cgit v1.2.3-59-g8ed1b From cfbb825c76198c9095428c5f9362fbf6ae06f417 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 10 Jul 2018 18:15:05 +0200 Subject: btrfs: add incompat for raid1 with 3, 4 copies The new raid1c3 and raid1c4 profiles are backward incompatible and the name shall be 'raid1c34', the status can be found in the global supported features in /sys/fs/btrfs/features or in the per-filesystem directory. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 ++- fs/btrfs/sysfs.c | 2 ++ fs/btrfs/volumes.c | 9 +++++++++ include/uapi/linux/btrfs.h | 1 + 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 923a8804ae94..e76b3cda13e3 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -292,7 +292,8 @@ struct btrfs_super_block { BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ - BTRFS_FEATURE_INCOMPAT_METADATA_UUID) + BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ + BTRFS_FEATURE_INCOMPAT_RAID1C34) #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 786f4e4c6542..a1da1f4a511d 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -259,6 +259,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA); BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID); BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE); +BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34); static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(mixed_backref), @@ -273,6 +274,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(no_holes), BTRFS_FEAT_ATTR_PTR(metadata_uuid), BTRFS_FEAT_ATTR_PTR(free_space_tree), + BTRFS_FEAT_ATTR_PTR(raid1c34), NULL }; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9054203ae671..289c34f91996 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4717,6 +4717,14 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) btrfs_set_fs_incompat(info, RAID56); } +static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) +{ + if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) + return; + + btrfs_set_fs_incompat(info, RAID1C34); +} + static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 start, u64 type) { @@ -4983,6 +4991,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, free_extent_map(em); check_raid56_incompat_flag(info, type); + check_raid1c34_incompat_flag(info, type); kfree(devices_info); return 0; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index a2b761275bba..7a8bc8b920f5 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -270,6 +270,7 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) #define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10) +#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11) struct btrfs_ioctl_feature_flags { __u64 compat_flags; -- cgit v1.2.3-59-g8ed1b From 9c907446dce311dc7e7b8958425e92ac03d3d00c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 31 Oct 2019 15:52:01 +0100 Subject: btrfs: drop incompat bit for raid1c34 after last block group is gone When there are no raid1c3 or raid1c4 block groups left after balance (either convert or with other filters applied), remove the incompat bit. This is already done for RAID56, do the same for RAID1C34. Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 1e521db3ef56..9ce9c2e318cf 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -828,27 +828,36 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) * * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group * in the whole filesystem + * + * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups */ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) { - if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) { + bool found_raid56 = false; + bool found_raid1c34 = false; + + if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || + (flags & BTRFS_BLOCK_GROUP_RAID1C3) || + (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { struct list_head *head = &fs_info->space_info; struct btrfs_space_info *sinfo; list_for_each_entry_rcu(sinfo, head, list) { - bool found = false; - down_read(&sinfo->groups_sem); if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) - found = true; + found_raid56 = true; if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) - found = true; + found_raid56 = true; + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) + found_raid1c34 = true; + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) + found_raid1c34 = true; up_read(&sinfo->groups_sem); - - if (found) - return; } - btrfs_clear_fs_incompat(fs_info, RAID56); + if (found_raid56) + btrfs_clear_fs_incompat(fs_info, RAID56); + if (found_raid1c34) + btrfs_clear_fs_incompat(fs_info, RAID1C34); } } -- cgit v1.2.3-59-g8ed1b From f5c2a52590858d18f4a736d8062fe70f11988fa9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 10 Oct 2019 23:29:21 +0200 Subject: btrfs: merge blocking_writers branches in btrfs_tree_read_lock There are two ifs that use eb::blocking_writers. As this is a variable modified inside and outside of locks, we could minimize number of accesses to avoid problems with getting different results at different times. The access here is locked so this can only race with btrfs_tree_unlock that sets blocking_writers to 0 without lock and unsets the lock owner. The first branch is taken only if the same thread already holds the lock, the second if checks for blocking writers. Here we'd either unlock and wait, or proceed. Both are valid states of the locking protocol. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/locking.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 93146b495276..c84c650e56c7 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -128,20 +128,21 @@ again: read_lock(&eb->lock); BUG_ON(eb->blocking_writers == 0 && current->pid == eb->lock_owner); - if (eb->blocking_writers && current->pid == eb->lock_owner) { - /* - * This extent is already write-locked by our thread. We allow - * an additional read lock to be added because it's for the same - * thread. btrfs_find_all_roots() depends on this as it may be - * called on a partly (write-)locked tree. - */ - BUG_ON(eb->lock_nested); - eb->lock_nested = true; - read_unlock(&eb->lock); - trace_btrfs_tree_read_lock(eb, start_ns); - return; - } if (eb->blocking_writers) { + if (current->pid == eb->lock_owner) { + /* + * This extent is already write-locked by our thread. + * We allow an additional read lock to be added because + * it's for the same thread. btrfs_find_all_roots() + * depends on this as it may be called on a partly + * (write-)locked tree. + */ + BUG_ON(eb->lock_nested); + eb->lock_nested = true; + read_unlock(&eb->lock); + trace_btrfs_tree_read_lock(eb, start_ns); + return; + } read_unlock(&eb->lock); wait_event(eb->write_lock_wq, eb->blocking_writers == 0); -- cgit v1.2.3-59-g8ed1b From 40d38f53d476238594c24c677593913695e6dec7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 10 Oct 2019 23:31:19 +0200 Subject: btrfs: set blocking_writers directly, no increment or decrement The increment and decrement was inherited from previous version that used atomics, switched in commit 06297d8cefca ("btrfs: switch extent_buffer blocking_writers from atomic to int"). The only possible values are 0 and 1 so we can set them directly. The generated assembly (gcc 9.x) did the direct value assignment in btrfs_set_lock_blocking_write (asm diff after change in 06297d8cefca): 5d: test %eax,%eax 5f: je 62 61: retq - 62: lock incl 0x44(%rdi) - 66: add $0x50,%rdi - 6a: jmpq 6f + 62: movl $0x1,0x44(%rdi) + 69: add $0x50,%rdi + 6d: jmpq 72 The part in btrfs_tree_unlock did a decrement because BUG_ON(blockers > 1) is probably not a strong hint for the compiler, but otherwise the output looks safe: - lock decl 0x44(%rdi) + sub $0x1,%eax + mov %eax,0x44(%rdi) Signed-off-by: David Sterba --- fs/btrfs/locking.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index c84c650e56c7..00edf91c3d1c 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -109,7 +109,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) if (eb->blocking_writers == 0) { btrfs_assert_spinning_writers_put(eb); btrfs_assert_tree_locked(eb); - eb->blocking_writers++; + eb->blocking_writers = 1; write_unlock(&eb->lock); } } @@ -305,7 +305,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb) if (blockers) { btrfs_assert_no_spinning_writers(eb); - eb->blocking_writers--; + eb->blocking_writers = 0; /* * We need to order modifying blocking_writers above with * actually waking up the sleepers to ensure they see the -- cgit v1.2.3-59-g8ed1b From a4477988cfed18bdf1ad04d29b4b4a3c53269dfc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 11 Oct 2019 00:03:14 +0200 Subject: btrfs: access eb::blocking_writers according to ACCESS_ONCE policies A nice writeup of the LKMM (Linux Kernel Memory Model) rules for access once policies can be found here https://lwn.net/Articles/799218/#Access-Marking%20Policies . The locked and unlocked access to eb::blocking_writers should be annotated accordingly, following this: Writes: - locked write must use ONCE, may use plain read - unlocked write must use ONCE Reads: - unlocked read must use ONCE - locked read may use plain read iff not mixed with unlocked read - unlocked read then locked must use ONCE There's one difference on the assembly level, where btrfs_tree_read_lock_atomic and btrfs_try_tree_read_lock used the cached value and did not reevaluate it after taking the lock. This could have missed some opportunities to take the lock in case blocking writers changed between the calls, but the window is just a few instructions long. As this is in try-lock, the callers handle that. Signed-off-by: David Sterba --- fs/btrfs/locking.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 00edf91c3d1c..4cd593a2f58c 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -109,7 +109,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) if (eb->blocking_writers == 0) { btrfs_assert_spinning_writers_put(eb); btrfs_assert_tree_locked(eb); - eb->blocking_writers = 1; + WRITE_ONCE(eb->blocking_writers, 1); write_unlock(&eb->lock); } } @@ -145,7 +145,7 @@ again: } read_unlock(&eb->lock); wait_event(eb->write_lock_wq, - eb->blocking_writers == 0); + READ_ONCE(eb->blocking_writers) == 0); goto again; } btrfs_assert_tree_read_locks_get(eb); @@ -160,11 +160,12 @@ again: */ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) { - if (eb->blocking_writers) + if (READ_ONCE(eb->blocking_writers)) return 0; read_lock(&eb->lock); - if (eb->blocking_writers) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers)) { read_unlock(&eb->lock); return 0; } @@ -180,13 +181,14 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) */ int btrfs_try_tree_read_lock(struct extent_buffer *eb) { - if (eb->blocking_writers) + if (READ_ONCE(eb->blocking_writers)) return 0; if (!read_trylock(&eb->lock)) return 0; - if (eb->blocking_writers) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers)) { read_unlock(&eb->lock); return 0; } @@ -202,11 +204,12 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb) */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { - if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) + if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) return 0; write_lock(&eb->lock); - if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); return 0; } @@ -277,9 +280,11 @@ void btrfs_tree_lock(struct extent_buffer *eb) WARN_ON(eb->lock_owner == current->pid); again: wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); - wait_event(eb->write_lock_wq, eb->blocking_writers == 0); + wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0); write_lock(&eb->lock); - if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) { + /* Refetch value after lock */ + if (atomic_read(&eb->blocking_readers) || + READ_ONCE(eb->blocking_writers)) { write_unlock(&eb->lock); goto again; } @@ -294,6 +299,10 @@ again: */ void btrfs_tree_unlock(struct extent_buffer *eb) { + /* + * This is read both locked and unlocked but always by the same thread + * that already owns the lock so we don't need to use READ_ONCE + */ int blockers = eb->blocking_writers; BUG_ON(blockers > 1); @@ -305,7 +314,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb) if (blockers) { btrfs_assert_no_spinning_writers(eb); - eb->blocking_writers = 0; + /* Unlocked write */ + WRITE_ONCE(eb->blocking_writers, 0); /* * We need to order modifying blocking_writers above with * actually waking up the sleepers to ensure they see the -- cgit v1.2.3-59-g8ed1b From d4e253bbbc2036219807b07f6258027fc443670c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 16 Oct 2019 18:29:10 +0200 Subject: btrfs: document extent buffer locking Signed-off-by: David Sterba --- fs/btrfs/locking.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 158 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 4cd593a2f58c..571c4826c428 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -13,6 +13,110 @@ #include "extent_io.h" #include "locking.h" +/* + * Extent buffer locking + * ===================== + * + * The locks use a custom scheme that allows to do more operations than are + * available fromt current locking primitives. The building blocks are still + * rwlock and wait queues. + * + * Required semantics: + * + * - reader/writer exclusion + * - writer/writer exclusion + * - reader/reader sharing + * - spinning lock semantics + * - blocking lock semantics + * - try-lock semantics for readers and writers + * - one level nesting, allowing read lock to be taken by the same thread that + * already has write lock + * + * The extent buffer locks (also called tree locks) manage access to eb data + * related to the storage in the b-tree (keys, items, but not the individual + * members of eb). + * We want concurrency of many readers and safe updates. The underlying locking + * is done by read-write spinlock and the blocking part is implemented using + * counters and wait queues. + * + * spinning semantics - the low-level rwlock is held so all other threads that + * want to take it are spinning on it. + * + * blocking semantics - the low-level rwlock is not held but the counter + * denotes how many times the blocking lock was held; + * sleeping is possible + * + * Write lock always allows only one thread to access the data. + * + * + * Debugging + * --------- + * + * There are additional state counters that are asserted in various contexts, + * removed from non-debug build to reduce extent_buffer size and for + * performance reasons. + * + * + * Lock nesting + * ------------ + * + * A write operation on a tree might indirectly start a look up on the same + * tree. This can happen when btrfs_cow_block locks the tree and needs to + * lookup free extents. + * + * btrfs_cow_block + * .. + * alloc_tree_block_no_bg_flush + * btrfs_alloc_tree_block + * btrfs_reserve_extent + * .. + * load_free_space_cache + * .. + * btrfs_lookup_file_extent + * btrfs_search_slot + * + * + * Locking pattern - spinning + * -------------------------- + * + * The simple locking scenario, the +--+ denotes the spinning section. + * + * +- btrfs_tree_lock + * | - extent_buffer::rwlock is held + * | - no heavy operations should happen, eg. IO, memory allocations, large + * | structure traversals + * +- btrfs_tree_unock +* +* + * Locking pattern - blocking + * -------------------------- + * + * The blocking write uses the following scheme. The +--+ denotes the spinning + * section. + * + * +- btrfs_tree_lock + * | + * +- btrfs_set_lock_blocking_write + * + * - allowed: IO, memory allocations, etc. + * + * -- btrfs_tree_unlock - note, no explicit unblocking necessary + * + * + * Blocking read is similar. + * + * +- btrfs_tree_read_lock + * | + * +- btrfs_set_lock_blocking_read + * + * - heavy operations allowed + * + * +- btrfs_tree_read_unlock_blocking + * | + * +- btrfs_tree_read_unlock + * + */ + #ifdef CONFIG_BTRFS_DEBUG static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { @@ -80,6 +184,15 @@ static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { } #endif +/* + * Mark already held read lock as blocking. Can be nested in write lock by the + * same thread. + * + * Use when there are potentially long operations ahead so other thread waiting + * on the lock will not actively spin but sleep instead. + * + * The rwlock is released and blocking reader counter is increased. + */ void btrfs_set_lock_blocking_read(struct extent_buffer *eb) { trace_btrfs_set_lock_blocking_read(eb); @@ -96,6 +209,14 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb) read_unlock(&eb->lock); } +/* + * Mark already held write lock as blocking. + * + * Use when there are potentially long operations ahead so other threads + * waiting on the lock will not actively spin but sleep instead. + * + * The rwlock is released and blocking writers is set. + */ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) { trace_btrfs_set_lock_blocking_write(eb); @@ -115,8 +236,13 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) } /* - * take a spinning read lock. This will wait for any blocking - * writers + * Lock the extent buffer for read. Wait for any writers (spinning or blocking). + * Can be nested in write lock by the same thread. + * + * Use when the locked section does only lightweight actions and busy waiting + * would be cheaper than making other threads do the wait/wake loop. + * + * The rwlock is held upon exit. */ void btrfs_tree_read_lock(struct extent_buffer *eb) { @@ -154,9 +280,10 @@ again: } /* - * take a spinning read lock. - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers + * Lock extent buffer for read, optimistically expecting that there are no + * contending blocking writers. If there are, don't wait. + * + * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) { @@ -176,8 +303,9 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) } /* - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers + * Try-lock for read. Don't block or wait for contending writers. + * + * Retrun 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_read_lock(struct extent_buffer *eb) { @@ -199,8 +327,10 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb) } /* - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers or readers + * Try-lock for write. May block until the lock is uncontended, but does not + * wait until it is free. + * + * Retrun 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { @@ -221,7 +351,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) } /* - * drop a spinning read lock + * Release read lock. Must be used only if the lock is in spinning mode. If + * the read lock is nested, must pair with read lock before the write unlock. + * + * The rwlock is not held upon exit. */ void btrfs_tree_read_unlock(struct extent_buffer *eb) { @@ -243,7 +376,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) } /* - * drop a blocking read lock + * Release read lock, previously set to blocking by a pairing call to + * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same + * thread. + * + * State of rwlock is unchanged, last reader wakes waiting threads. */ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) { @@ -267,8 +404,10 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) } /* - * take a spinning write lock. This will wait for both - * blocking readers or writers + * Lock for write. Wait for all blocking and spinning readers and writers. This + * starts context where reader lock could be nested by the same thread. + * + * The rwlock is held for write upon exit. */ void btrfs_tree_lock(struct extent_buffer *eb) { @@ -295,7 +434,12 @@ again: } /* - * drop a spinning or a blocking write lock. + * Release the write lock, either blocking or spinning (ie. there's no need + * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking). + * This also ends the context for nesting, the read lock must have been + * released already. + * + * Tasks blocked and waiting are woken, rwlock is not held upon exit. */ void btrfs_tree_unlock(struct extent_buffer *eb) { -- cgit v1.2.3-59-g8ed1b From ffb9e0f05fab5f7fe62c5d6964825e59e91be0db Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 10 Oct 2019 10:39:27 +0800 Subject: btrfs: block-group: Refactor btrfs_read_block_groups() Refactor the work inside the loop of btrfs_read_block_groups() into one separate function, read_one_block_group(). This allows read_one_block_group to be reused for later BG_TREE feature. The refactor does the following extra fix: - Use btrfs_fs_incompat() to replace open-coded feature check Reviewed-by: Johannes Thumshirn Reviewed-by: Anand Jain Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 219 ++++++++++++++++++++++++------------------------- 1 file changed, 108 insertions(+), 111 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9ce9c2e318cf..1bde24d336ca 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1686,6 +1686,109 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) return ret; } +static int read_one_block_group(struct btrfs_fs_info *info, + struct btrfs_path *path, + int need_clear) +{ + struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_block_group_cache *cache; + struct btrfs_space_info *space_info; + struct btrfs_key key; + struct btrfs_block_group_item bgi; + const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); + int slot = path->slots[0]; + int ret; + + btrfs_item_key_to_cpu(leaf, &key, slot); + ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY); + + cache = btrfs_create_block_group_cache(info, key.objectid, key.offset); + if (!cache) + return -ENOMEM; + + if (need_clear) { + /* + * When we mount with old space cache, we need to + * set BTRFS_DC_CLEAR and set dirty flag. + * + * a) Setting 'BTRFS_DC_CLEAR' makes sure that we + * truncate the old free space cache inode and + * setup a new one. + * b) Setting 'dirty flag' makes sure that we flush + * the new space cache info onto disk. + */ + if (btrfs_test_opt(info, SPACE_CACHE)) + cache->disk_cache_state = BTRFS_DC_CLEAR; + } + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), + sizeof(bgi)); + cache->used = btrfs_stack_block_group_used(&bgi); + cache->flags = btrfs_stack_block_group_flags(&bgi); + if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && + (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { + btrfs_err(info, +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", + cache->start); + ret = -EINVAL; + goto error; + } + + /* + * We need to exclude the super stripes now so that the space info has + * super bytes accounted for, otherwise we'll think we have more space + * than we actually do. + */ + ret = exclude_super_stripes(cache); + if (ret) { + /* We may have excluded something, so call this just in case. */ + btrfs_free_excluded_extents(cache); + goto error; + } + + /* + * Check for two cases, either we are full, and therefore don't need + * to bother with the caching work since we won't find any space, or we + * are empty, and we can just add all the space in and be done with it. + * This saves us _a_lot_ of time, particularly in the full case. + */ + if (key.offset == cache->used) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + btrfs_free_excluded_extents(cache); + } else if (cache->used == 0) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + add_new_free_space(cache, key.objectid, + key.objectid + key.offset); + btrfs_free_excluded_extents(cache); + } + + ret = btrfs_add_block_group_cache(info, cache); + if (ret) { + btrfs_remove_free_space_cache(cache); + goto error; + } + trace_btrfs_add_block_group(info, cache, 0); + btrfs_update_space_info(info, cache->flags, key.offset, + cache->used, cache->bytes_super, &space_info); + + cache->space_info = space_info; + + link_block_group(cache); + + set_avail_alloc_bits(info, cache->flags); + if (btrfs_chunk_readonly(info, cache->start)) { + inc_block_group_ro(cache, 1); + } else if (cache->used == 0) { + ASSERT(list_empty(&cache->bg_list)); + btrfs_mark_bg_unused(cache); + } + return 0; +error: + btrfs_put_block_group(cache); + return ret; +} + int btrfs_read_block_groups(struct btrfs_fs_info *info) { struct btrfs_path *path; @@ -1693,15 +1796,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) struct btrfs_block_group_cache *cache; struct btrfs_space_info *space_info; struct btrfs_key key; - struct btrfs_key found_key; - struct extent_buffer *leaf; int need_clear = 0; u64 cache_gen; - u64 feature; - int mixed; - - feature = btrfs_super_incompat_flags(info->super_copy); - mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS); key.objectid = 0; key.offset = 0; @@ -1719,118 +1815,19 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) need_clear = 1; while (1) { - struct btrfs_block_group_item bgi; - ret = find_first_block_group(info, path, &key); if (ret > 0) break; if (ret != 0) goto error; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - cache = btrfs_create_block_group_cache(info, found_key.objectid, - found_key.offset); - if (!cache) { - ret = -ENOMEM; - goto error; - } - - if (need_clear) { - /* - * When we mount with old space cache, we need to - * set BTRFS_DC_CLEAR and set dirty flag. - * - * a) Setting 'BTRFS_DC_CLEAR' makes sure that we - * truncate the old free space cache inode and - * setup a new one. - * b) Setting 'dirty flag' makes sure that we flush - * the new space cache info onto disk. - */ - if (btrfs_test_opt(info, SPACE_CACHE)) - cache->disk_cache_state = BTRFS_DC_CLEAR; - } - - read_extent_buffer(leaf, &bgi, - btrfs_item_ptr_offset(leaf, path->slots[0]), - sizeof(bgi)); - /* cache::chunk_objectid is unused */ - cache->used = btrfs_stack_block_group_used(&bgi); - cache->flags = btrfs_stack_block_group_flags(&bgi); - if (!mixed && - ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && - (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { - btrfs_err(info, -"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", - cache->start); - btrfs_put_block_group(cache); - ret = -EINVAL; + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); + ret = read_one_block_group(info, path, need_clear); + if (ret < 0) goto error; - } - - key.objectid = found_key.objectid + found_key.offset; + key.objectid += key.offset; + key.offset = 0; btrfs_release_path(path); - - /* - * We need to exclude the super stripes now so that the space - * info has super bytes accounted for, otherwise we'll think - * we have more space than we actually do. - */ - ret = exclude_super_stripes(cache); - if (ret) { - /* - * We may have excluded something, so call this just in - * case. - */ - btrfs_free_excluded_extents(cache); - btrfs_put_block_group(cache); - goto error; - } - - /* - * Check for two cases, either we are full, and therefore - * don't need to bother with the caching work since we won't - * find any space, or we are empty, and we can just add all - * the space in and be done with it. This saves us _a_lot_ of - * time, particularly in the full case. - */ - if (found_key.offset == cache->used) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - btrfs_free_excluded_extents(cache); - } else if (cache->used == 0) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, found_key.objectid, - found_key.objectid + - found_key.offset); - btrfs_free_excluded_extents(cache); - } - - ret = btrfs_add_block_group_cache(info, cache); - if (ret) { - btrfs_remove_free_space_cache(cache); - btrfs_put_block_group(cache); - goto error; - } - - trace_btrfs_add_block_group(info, cache, 0); - btrfs_update_space_info(info, cache->flags, found_key.offset, - cache->used, - cache->bytes_super, &space_info); - - cache->space_info = space_info; - - link_block_group(cache); - - set_avail_alloc_bits(info, cache->flags); - if (btrfs_chunk_readonly(info, cache->start)) { - inc_block_group_ro(cache, 1); - } else if (cache->used == 0) { - ASSERT(list_empty(&cache->bg_list)); - btrfs_mark_bg_unused(cache); - } } list_for_each_entry_rcu(space_info, &info->space_info, list) { -- cgit v1.2.3-59-g8ed1b From d49a2ddb1568d533ef3b77841a3ea1ff5bfa8b98 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 5 Nov 2019 09:35:35 +0800 Subject: btrfs: block-group: Reuse the item key from caller of read_one_block_group() For read_one_block_group(), its only caller has already got the item key to search next block group item. So we can use that key directly without doing our own convertion on stack. Also, since that key used in btrfs_read_block_groups() is vital for block group item search, add 'const' keyword for that parameter to prevent read_one_block_group() to modify it. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 1bde24d336ca..3ed853acfa05 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1688,21 +1688,20 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) static int read_one_block_group(struct btrfs_fs_info *info, struct btrfs_path *path, + const struct btrfs_key *key, int need_clear) { struct extent_buffer *leaf = path->nodes[0]; struct btrfs_block_group_cache *cache; struct btrfs_space_info *space_info; - struct btrfs_key key; struct btrfs_block_group_item bgi; const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); int slot = path->slots[0]; int ret; - btrfs_item_key_to_cpu(leaf, &key, slot); - ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY); + ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); - cache = btrfs_create_block_group_cache(info, key.objectid, key.offset); + cache = btrfs_create_block_group_cache(info, key->objectid, key->offset); if (!cache) return -ENOMEM; @@ -1751,15 +1750,15 @@ static int read_one_block_group(struct btrfs_fs_info *info, * are empty, and we can just add all the space in and be done with it. * This saves us _a_lot_ of time, particularly in the full case. */ - if (key.offset == cache->used) { + if (key->offset == cache->used) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; btrfs_free_excluded_extents(cache); } else if (cache->used == 0) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, key.objectid, - key.objectid + key.offset); + add_new_free_space(cache, key->objectid, + key->objectid + key->offset); btrfs_free_excluded_extents(cache); } @@ -1769,7 +1768,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, goto error; } trace_btrfs_add_block_group(info, cache, 0); - btrfs_update_space_info(info, cache->flags, key.offset, + btrfs_update_space_info(info, cache->flags, key->offset, cache->used, cache->bytes_super, &space_info); cache->space_info = space_info; @@ -1822,7 +1821,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) goto error; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - ret = read_one_block_group(info, path, need_clear); + ret = read_one_block_group(info, path, &key, need_clear); if (ret < 0) goto error; key.objectid += key.offset; -- cgit v1.2.3-59-g8ed1b From 32da5386d9a4fd5c1155cecf703df104d918954c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 29 Oct 2019 19:20:18 +0100 Subject: btrfs: rename btrfs_block_group_cache The type name is misleading, a single entry is named 'cache' while this normally means a collection of objects. Rename that everywhere. Also the identifier was quite long, making function prototypes harder to format. Suggested-by: Nikolay Borisov Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 128 ++++++++++++++++----------------- fs/btrfs/block-group.h | 45 ++++++------ fs/btrfs/ctree.h | 14 ++-- fs/btrfs/disk-io.c | 8 +-- fs/btrfs/extent-tree.c | 67 +++++++++-------- fs/btrfs/free-space-cache.c | 71 +++++++++--------- fs/btrfs/free-space-cache.h | 39 +++++----- fs/btrfs/free-space-tree.c | 50 ++++++------- fs/btrfs/free-space-tree.h | 18 ++--- fs/btrfs/inode.c | 4 +- fs/btrfs/ioctl.c | 2 +- fs/btrfs/qgroup.c | 2 +- fs/btrfs/qgroup.h | 2 +- fs/btrfs/reada.c | 2 +- fs/btrfs/relocation.c | 13 ++-- fs/btrfs/scrub.c | 11 ++- fs/btrfs/space-info.c | 2 +- fs/btrfs/sysfs.c | 4 +- fs/btrfs/sysfs.h | 2 +- fs/btrfs/tests/btrfs-tests.c | 6 +- fs/btrfs/tests/btrfs-tests.h | 4 +- fs/btrfs/tests/free-space-tests.c | 15 ++-- fs/btrfs/tests/free-space-tree-tests.c | 26 +++---- fs/btrfs/transaction.c | 6 +- fs/btrfs/volumes.c | 6 +- include/trace/events/btrfs.h | 24 +++---- 26 files changed, 278 insertions(+), 293 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 3ed853acfa05..d96561d1ce90 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -120,12 +120,12 @@ u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) return get_alloc_profile(fs_info, orig_flags); } -void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group(struct btrfs_block_group *cache) { atomic_inc(&cache->count); } -void btrfs_put_block_group(struct btrfs_block_group_cache *cache) +void btrfs_put_block_group(struct btrfs_block_group *cache) { if (atomic_dec_and_test(&cache->count)) { WARN_ON(cache->pinned > 0); @@ -149,19 +149,18 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) * This adds the block group to the fs_info rb tree for the block group cache */ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct rb_node **p; struct rb_node *parent = NULL; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; spin_lock(&info->block_group_cache_lock); p = &info->block_group_cache_tree.rb_node; while (*p) { parent = *p; - cache = rb_entry(parent, struct btrfs_block_group_cache, - cache_node); + cache = rb_entry(parent, struct btrfs_block_group, cache_node); if (block_group->start < cache->start) { p = &(*p)->rb_left; } else if (block_group->start > cache->start) { @@ -188,10 +187,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, * This will return the block group at or after bytenr if contains is 0, else * it will return the block group that contains the bytenr */ -static struct btrfs_block_group_cache *block_group_cache_tree_search( +static struct btrfs_block_group *block_group_cache_tree_search( struct btrfs_fs_info *info, u64 bytenr, int contains) { - struct btrfs_block_group_cache *cache, *ret = NULL; + struct btrfs_block_group *cache, *ret = NULL; struct rb_node *n; u64 end, start; @@ -199,8 +198,7 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( n = info->block_group_cache_tree.rb_node; while (n) { - cache = rb_entry(n, struct btrfs_block_group_cache, - cache_node); + cache = rb_entry(n, struct btrfs_block_group, cache_node); end = cache->start + cache->length - 1; start = cache->start; @@ -232,7 +230,7 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( /* * Return the block group that starts at or after bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_first_block_group( +struct btrfs_block_group *btrfs_lookup_first_block_group( struct btrfs_fs_info *info, u64 bytenr) { return block_group_cache_tree_search(info, bytenr, 0); @@ -241,14 +239,14 @@ struct btrfs_block_group_cache *btrfs_lookup_first_block_group( /* * Return the block group that contains the given bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_block_group( +struct btrfs_block_group *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr) { return block_group_cache_tree_search(info, bytenr, 1); } -struct btrfs_block_group_cache *btrfs_next_block_group( - struct btrfs_block_group_cache *cache) +struct btrfs_block_group *btrfs_next_block_group( + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct rb_node *node; @@ -266,8 +264,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( node = rb_next(&cache->cache_node); btrfs_put_block_group(cache); if (node) { - cache = rb_entry(node, struct btrfs_block_group_cache, - cache_node); + cache = rb_entry(node, struct btrfs_block_group, cache_node); btrfs_get_block_group(cache); } else cache = NULL; @@ -277,7 +274,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bool ret = true; bg = btrfs_lookup_block_group(fs_info, bytenr); @@ -300,7 +297,7 @@ bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bg = btrfs_lookup_block_group(fs_info, bytenr); ASSERT(bg); @@ -314,7 +311,7 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) btrfs_put_block_group(bg); } -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) +void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) { wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); } @@ -322,7 +319,7 @@ void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, const u64 start) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bg = btrfs_lookup_block_group(fs_info, start); ASSERT(bg); @@ -331,7 +328,7 @@ void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, btrfs_put_block_group(bg); } -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) +void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) { struct btrfs_space_info *space_info = bg->space_info; @@ -357,7 +354,7 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) } struct btrfs_caching_control *btrfs_get_caching_control( - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_caching_control *ctl; @@ -392,7 +389,7 @@ void btrfs_put_caching_control(struct btrfs_caching_control *ctl) * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using * any of the information in this block group. */ -void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes) { struct btrfs_caching_control *caching_ctl; @@ -401,13 +398,13 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache if (!caching_ctl) return; - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || + wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || (cache->free_space_ctl->free_space >= num_bytes)); btrfs_put_caching_control(caching_ctl); } -int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) +int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) { struct btrfs_caching_control *caching_ctl; int ret = 0; @@ -416,7 +413,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) if (!caching_ctl) return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); + wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); if (cache->cached == BTRFS_CACHE_ERROR) ret = -EIO; btrfs_put_caching_control(caching_ctl); @@ -424,7 +421,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) } #ifdef CONFIG_BTRFS_DEBUG -static void fragment_free_space(struct btrfs_block_group_cache *block_group) +static void fragment_free_space(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; u64 start = block_group->start; @@ -450,8 +447,7 @@ static void fragment_free_space(struct btrfs_block_group_cache *block_group) * used yet since their free space will be released as soon as the transaction * commits. */ -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, - u64 start, u64 end) +u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) { struct btrfs_fs_info *info = block_group->fs_info; u64 extent_start, extent_end, size, total_added = 0; @@ -491,7 +487,7 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group, static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) { - struct btrfs_block_group_cache *block_group = caching_ctl->block_group; + struct btrfs_block_group *block_group = caching_ctl->block_group; struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_path *path; @@ -626,7 +622,7 @@ out: static noinline void caching_thread(struct btrfs_work *work) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; int ret; @@ -674,8 +670,7 @@ static noinline void caching_thread(struct btrfs_work *work) btrfs_put_block_group(block_group); } -int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, - int load_cache_only) +int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) { DEFINE_WAIT(wait); struct btrfs_fs_info *fs_info = cache->fs_info; @@ -867,7 +862,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_path *path; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_free_cluster *cluster; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_key key; @@ -1185,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( * data in this block group. That check should be done by relocation routine, * not this function. */ -static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +static int inc_block_group_ro(struct btrfs_block_group *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -1251,7 +1246,7 @@ out: */ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; struct btrfs_trans_handle *trans; int ret = 0; @@ -1265,7 +1260,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) int trimming; block_group = list_first_entry(&fs_info->unused_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&block_group->bg_list); @@ -1413,7 +1408,7 @@ next: spin_unlock(&fs_info->unused_bgs_lock); } -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) +void btrfs_mark_bg_unused(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; @@ -1521,7 +1516,7 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) write_sequnlock(&fs_info->profiles_lock); } -static int exclude_super_stripes(struct btrfs_block_group_cache *cache) +static int exclude_super_stripes(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; u64 bytenr; @@ -1576,7 +1571,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) return 0; } -static void link_block_group(struct btrfs_block_group_cache *cache) +static void link_block_group(struct btrfs_block_group *cache) { struct btrfs_space_info *space_info = cache->space_info; int index = btrfs_bg_flags_to_raid_index(cache->flags); @@ -1592,10 +1587,10 @@ static void link_block_group(struct btrfs_block_group_cache *cache) btrfs_sysfs_add_block_group_type(cache); } -static struct btrfs_block_group_cache *btrfs_create_block_group_cache( +static struct btrfs_block_group *btrfs_create_block_group_cache( struct btrfs_fs_info *fs_info, u64 start, u64 size) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) @@ -1640,7 +1635,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) { struct extent_map_tree *map_tree = &fs_info->mapping_tree; struct extent_map *em; - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; u64 start = 0; int ret = 0; @@ -1692,7 +1687,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, int need_clear) { struct extent_buffer *leaf = path->nodes[0]; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_space_info *space_info; struct btrfs_block_group_item bgi; const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); @@ -1792,7 +1787,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) { struct btrfs_path *path; int ret; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_space_info *space_info; struct btrfs_key key; int need_clear = 0; @@ -1860,7 +1855,7 @@ error: void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_block_group_item item; struct btrfs_key key; @@ -1871,7 +1866,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) while (!list_empty(&trans->new_bgs)) { block_group = list_first_entry(&trans->new_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); if (ret) goto next; @@ -1906,7 +1901,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret; btrfs_set_log_full_commit(trans); @@ -2022,7 +2017,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) return flags; } -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; @@ -2092,7 +2087,7 @@ out: return ret; } -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) +void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -2113,7 +2108,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_path *path, - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; @@ -2148,7 +2143,7 @@ fail: } -static int cache_save_setup(struct btrfs_block_group_cache *block_group, +static int cache_save_setup(struct btrfs_block_group *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) { @@ -2312,7 +2307,7 @@ out: int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache, *tmp; + struct btrfs_block_group *cache, *tmp; struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_path *path; @@ -2350,7 +2345,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; int should_put; @@ -2387,8 +2382,7 @@ again: while (!list_empty(&dirty)) { bool drop_reserve = true; - cache = list_first_entry(&dirty, - struct btrfs_block_group_cache, + cache = list_first_entry(&dirty, struct btrfs_block_group, dirty_list); /* * This can happen if something re-dirties a block group that @@ -2513,7 +2507,7 @@ again: int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; int should_put; @@ -2543,7 +2537,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) spin_lock(&cur_trans->dirty_bgs_lock); while (!list_empty(&cur_trans->dirty_bgs)) { cache = list_first_entry(&cur_trans->dirty_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, dirty_list); /* @@ -2625,7 +2619,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) * to use it without any locking */ while (!list_empty(io)) { - cache = list_first_entry(io, struct btrfs_block_group_cache, + cache = list_first_entry(io, struct btrfs_block_group, io_list); list_del_init(&cache->io_list); btrfs_wait_cache_io(trans, cache, path); @@ -2640,7 +2634,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int alloc) { struct btrfs_fs_info *info = trans->fs_info; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; u64 total = num_bytes; u64 old_val; u64 byte_in_group; @@ -2671,7 +2665,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * is because we need the unpinning stage to actually add the * space back to the block group, otherwise we will leak space. */ - if (!alloc && !btrfs_block_group_cache_done(cache)) + if (!alloc && !btrfs_block_group_done(cache)) btrfs_cache_block_group(cache, 1); byte_in_group = bytenr - cache->start; @@ -2755,7 +2749,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * reservation and the block group has become read only we cannot make the * reservation and return -EAGAIN, otherwise this function always succeeds. */ -int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, +int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -2791,7 +2785,7 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, * A and before transaction A commits you free that leaf, you call this with * reserve set to 0 in order to clear the reservation. */ -void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -3054,7 +3048,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) void btrfs_put_block_group_cache(struct btrfs_fs_info *info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 last = 0; while (1) { @@ -3094,7 +3088,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) */ int btrfs_free_block_groups(struct btrfs_fs_info *info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; struct btrfs_caching_control *caching_ctl; struct rb_node *n; @@ -3111,7 +3105,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) spin_lock(&info->unused_bgs_lock); while (!list_empty(&info->unused_bgs)) { block_group = list_first_entry(&info->unused_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&block_group->bg_list); btrfs_put_block_group(block_group); @@ -3120,7 +3114,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) spin_lock(&info->block_group_cache_lock); while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { - block_group = rb_entry(n, struct btrfs_block_group_cache, + block_group = rb_entry(n, struct btrfs_block_group, cache_node); rb_erase(&block_group->cache_node, &info->block_group_cache_tree); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 2ea580352aff..4e7afc028791 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -34,7 +34,7 @@ struct btrfs_caching_control { struct mutex mutex; wait_queue_head_t wait; struct btrfs_work work; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 progress; refcount_t count; }; @@ -42,7 +42,7 @@ struct btrfs_caching_control { /* Once caching_thread() finds this much free space, it will wake up waiters. */ #define CACHING_CTL_WAKE_UP SZ_2M -struct btrfs_block_group_cache { +struct btrfs_block_group { struct btrfs_fs_info *fs_info; struct inode *inode; spinlock_t lock; @@ -160,7 +160,7 @@ struct btrfs_block_group_cache { #ifdef CONFIG_BTRFS_DEBUG static inline int btrfs_should_fragment_free_space( - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -171,29 +171,29 @@ static inline int btrfs_should_fragment_free_space( } #endif -struct btrfs_block_group_cache *btrfs_lookup_first_block_group( +struct btrfs_block_group *btrfs_lookup_first_block_group( struct btrfs_fs_info *info, u64 bytenr); -struct btrfs_block_group_cache *btrfs_lookup_block_group( +struct btrfs_block_group *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); -struct btrfs_block_group_cache *btrfs_next_block_group( - struct btrfs_block_group_cache *cache); -void btrfs_get_block_group(struct btrfs_block_group_cache *cache); -void btrfs_put_block_group(struct btrfs_block_group_cache *cache); +struct btrfs_block_group *btrfs_next_block_group( + struct btrfs_block_group *cache); +void btrfs_get_block_group(struct btrfs_block_group *cache); +void btrfs_put_block_group(struct btrfs_block_group *cache); void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, const u64 start); -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); +void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); -void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, +void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes); -int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); -int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, +int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache); +int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only); void btrfs_put_caching_control(struct btrfs_caching_control *ctl); struct btrfs_caching_control *btrfs_get_caching_control( - struct btrfs_block_group_cache *cache); -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *cache); +u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end); struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_fs_info *fs_info, @@ -201,21 +201,21 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( int btrfs_remove_block_group(struct btrfs_trans_handle *trans, u64 group_start, struct extent_map *em); void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); +void btrfs_mark_bg_unused(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache); +void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_update_block_group(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int alloc); -int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, +int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc); -void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc); int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, enum btrfs_chunk_alloc_enum force); @@ -240,8 +240,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); } -static inline int btrfs_block_group_cache_done( - struct btrfs_block_group_cache *cache) +static inline int btrfs_block_group_done(struct btrfs_block_group *cache) { smp_mb(); return cache->cached == BTRFS_CACHE_FINISHED || diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e76b3cda13e3..b2e8fd8a8e59 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -39,7 +39,7 @@ struct btrfs_transaction; struct btrfs_pending_snapshot; struct btrfs_delayed_ref_root; struct btrfs_space_info; -struct btrfs_block_group_cache; +struct btrfs_block_group; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; @@ -415,7 +415,7 @@ struct btrfs_free_cluster { /* We did a full search and couldn't create a cluster */ bool fragmented; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; /* * when a cluster is allocated from a block group, we put the * cluster onto a list in the block group so that it can @@ -478,8 +478,8 @@ struct btrfs_swapfile_pin { void *ptr; struct inode *inode; /* - * If true, ptr points to a struct btrfs_block_group_cache. Otherwise, - * ptr points to a struct btrfs_device. + * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr + * points to a struct btrfs_device. */ bool is_block_group; }; @@ -2401,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes); -void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache); +void btrfs_free_excluded_extents(struct btrfs_block_group *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, @@ -2457,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); -void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); +void btrfs_get_block_group_trimming(struct btrfs_block_group *cache); +void btrfs_put_block_group_trimming(struct btrfs_block_group *cache); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); enum btrfs_reserve_flush_enum { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ddb39f76f71a..8783d86f487d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4409,7 +4409,7 @@ again: return 0; } -static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) +static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) { struct inode *inode; @@ -4426,12 +4426,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, struct btrfs_fs_info *fs_info) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; spin_lock(&cur_trans->dirty_bgs_lock); while (!list_empty(&cur_trans->dirty_bgs)) { cache = list_first_entry(&cur_trans->dirty_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, dirty_list); if (!list_empty(&cache->io_list)) { @@ -4459,7 +4459,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, */ while (!list_empty(&cur_trans->io_bgs)) { cache = list_first_entry(&cur_trans->io_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, io_list); list_del_init(&cache->io_list); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f9091bd41e99..153f71a5bba9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -54,7 +54,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); -static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) +static int block_group_bits(struct btrfs_block_group *cache, u64 bits) { return (cache->flags & bits) == bits; } @@ -70,7 +70,7 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, return 0; } -void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) +void btrfs_free_excluded_extents(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; u64 start, end; @@ -2537,7 +2537,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; int readonly = 0; block_group = btrfs_lookup_block_group(fs_info, bytenr); @@ -2567,7 +2567,7 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 bytenr; spin_lock(&fs_info->block_group_cache_lock); @@ -2587,7 +2587,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) return bytenr; } -static int pin_down_extent(struct btrfs_block_group_cache *cache, +static int pin_down_extent(struct btrfs_block_group *cache, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_fs_info *fs_info = cache->fs_info; @@ -2614,7 +2614,7 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache, int btrfs_pin_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, int reserved) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; ASSERT(fs_info->running_transaction); @@ -2633,7 +2633,7 @@ int btrfs_pin_extent(struct btrfs_fs_info *fs_info, int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret; cache = btrfs_lookup_block_group(fs_info, bytenr); @@ -2660,7 +2660,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { int ret; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_caching_control *caching_ctl; block_group = btrfs_lookup_block_group(fs_info, start); @@ -2672,7 +2672,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, if (!caching_ctl) { /* Logic error */ - BUG_ON(!btrfs_block_group_cache_done(block_group)); + BUG_ON(!btrfs_block_group_done(block_group)); ret = btrfs_remove_free_space(block_group, start, num_bytes); } else { mutex_lock(&caching_ctl->mutex); @@ -2737,7 +2737,7 @@ int btrfs_exclude_logged_extents(struct extent_buffer *eb) } static void -btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) +btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) { atomic_inc(&bg->reservations); } @@ -2746,14 +2746,14 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) { struct btrfs_caching_control *next; struct btrfs_caching_control *caching_ctl; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; down_write(&fs_info->commit_root_sem); list_for_each_entry_safe(caching_ctl, next, &fs_info->caching_block_groups, list) { cache = caching_ctl->block_group; - if (btrfs_block_group_cache_done(cache)) { + if (btrfs_block_group_done(cache)) { cache->last_byte_to_unpin = (u64)-1; list_del_init(&caching_ctl->list); btrfs_put_caching_control(caching_ctl); @@ -2805,7 +2805,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end, const bool return_free_space) { - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_space_info *space_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_free_cluster *cluster = NULL; @@ -2900,7 +2900,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group, *tmp; + struct btrfs_block_group *block_group, *tmp; struct list_head *deleted_bgs; struct extent_io_tree *unpin; u64 start; @@ -3282,7 +3282,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, } if (last_ref && btrfs_header_generation(buf) == trans->transid) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, buf->start); @@ -3369,15 +3369,14 @@ enum btrfs_loop_type { }; static inline void -btrfs_lock_block_group(struct btrfs_block_group_cache *cache, +btrfs_lock_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) down_read(&cache->data_rwsem); } -static inline void -btrfs_grab_block_group(struct btrfs_block_group_cache *cache, +static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, int delalloc) { btrfs_get_block_group(cache); @@ -3385,12 +3384,12 @@ btrfs_grab_block_group(struct btrfs_block_group_cache *cache, down_read(&cache->data_rwsem); } -static struct btrfs_block_group_cache * -btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, +static struct btrfs_block_group *btrfs_lock_cluster( + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, int delalloc) { - struct btrfs_block_group_cache *used_bg = NULL; + struct btrfs_block_group *used_bg = NULL; spin_lock(&cluster->refill_lock); while (1) { @@ -3424,7 +3423,7 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, } static inline void -btrfs_release_block_group(struct btrfs_block_group_cache *cache, +btrfs_release_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) @@ -3495,12 +3494,12 @@ struct find_free_extent_ctl { * Return >0 to inform caller that we find nothing * Return 0 means we have found a location and set ffe_ctl->found_offset. */ -static int find_free_extent_clustered(struct btrfs_block_group_cache *bg, +static int find_free_extent_clustered(struct btrfs_block_group *bg, struct btrfs_free_cluster *last_ptr, struct find_free_extent_ctl *ffe_ctl, - struct btrfs_block_group_cache **cluster_bg_ret) + struct btrfs_block_group **cluster_bg_ret) { - struct btrfs_block_group_cache *cluster_bg; + struct btrfs_block_group *cluster_bg; u64 aligned_cluster; u64 offset; int ret; @@ -3599,7 +3598,7 @@ refill_cluster: * Return 0 when we found an free extent and set ffe_ctrl->found_offset * Return -EAGAIN to inform caller that we need to re-search this block group */ -static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg, +static int find_free_extent_unclustered(struct btrfs_block_group *bg, struct btrfs_free_cluster *last_ptr, struct find_free_extent_ctl *ffe_ctl) { @@ -3801,7 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, { int ret = 0; struct btrfs_free_cluster *last_ptr = NULL; - struct btrfs_block_group_cache *block_group = NULL; + struct btrfs_block_group *block_group = NULL; struct find_free_extent_ctl ffe_ctl = {0}; struct btrfs_space_info *space_info; bool use_cluster = true; @@ -3955,7 +3954,7 @@ search: } have_block_group: - ffe_ctl.cached = btrfs_block_group_cache_done(block_group); + ffe_ctl.cached = btrfs_block_group_done(block_group); if (unlikely(!ffe_ctl.cached)) { ffe_ctl.have_caching_bg = true; ret = btrfs_cache_block_group(block_group, 0); @@ -3971,7 +3970,7 @@ have_block_group: * lets look there */ if (last_ptr && use_cluster) { - struct btrfs_block_group_cache *cluster_bg = NULL; + struct btrfs_block_group *cluster_bg = NULL; ret = find_free_extent_clustered(block_group, last_ptr, &ffe_ctl, &cluster_bg); @@ -4153,7 +4152,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int pin, int delalloc) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret = 0; cache = btrfs_lookup_block_group(fs_info, start); @@ -4386,7 +4385,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; /* @@ -5500,7 +5499,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, */ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 free_bytes = 0; int factor; @@ -5642,7 +5641,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) */ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) { - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_device *device; struct list_head *devices; u64 group_trimmed; @@ -5675,7 +5674,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) end = min(range_end, cache->start + cache->length); if (end - start >= range->minlen) { - if (!btrfs_block_group_cache_done(cache)) { + if (!btrfs_block_group_done(cache)) { ret = btrfs_cache_block_group(cache, 0); if (ret) { bg_failed++; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 279c41c4ba50..3283da419200 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -91,8 +91,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, return inode; } -struct inode *lookup_free_space_inode( - struct btrfs_block_group_cache *block_group, +struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -190,7 +189,7 @@ static int __create_free_space_inode(struct btrfs_root *root, } int create_free_space_inode(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { int ret; @@ -224,7 +223,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, } int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -820,7 +819,7 @@ free_cache: goto out; } -int load_free_space_cache(struct btrfs_block_group_cache *block_group) +int load_free_space_cache(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -919,7 +918,7 @@ out: static noinline_for_stack int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space_ctl *ctl, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, int *entries, int *bitmaps, struct list_head *bitmap_list) { @@ -1047,7 +1046,7 @@ fail: } static noinline_for_stack int write_pinned_extent_entries( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, int *entries) { @@ -1146,7 +1145,7 @@ cleanup_write_cache_enospc(struct inode *inode, static int __btrfs_wait_cache_io(struct btrfs_root *root, struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, struct btrfs_path *path, u64 offset) { @@ -1215,7 +1214,7 @@ static int btrfs_wait_cache_io_root(struct btrfs_root *root, } int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, @@ -1236,7 +1235,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, */ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, struct btrfs_trans_handle *trans) { @@ -1374,7 +1373,7 @@ out_unlock: } int btrfs_write_out_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -1652,7 +1651,7 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { - struct btrfs_block_group_cache *block_group = ctl->private; + struct btrfs_block_group *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; @@ -1996,7 +1995,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, static bool use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - struct btrfs_block_group_cache *block_group = ctl->private; + struct btrfs_block_group *block_group = ctl->private; struct btrfs_fs_info *fs_info = block_group->fs_info; bool forced = false; @@ -2048,7 +2047,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { struct btrfs_free_space *bitmap_info; - struct btrfs_block_group_cache *block_group = NULL; + struct btrfs_block_group *block_group = NULL; int added = 0; u64 bytes, offset, bytes_added; int ret; @@ -2385,7 +2384,7 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size) { return __btrfs_add_free_space(block_group->fs_info, @@ -2393,7 +2392,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, bytenr, size); } -int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 offset, u64 bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2483,7 +2482,7 @@ out: return ret; } -void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, +void btrfs_dump_free_space(struct btrfs_block_group *block_group, u64 bytes) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -2508,7 +2507,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "%d blocks of free space at or bigger than bytes is", count); } -void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2537,7 +2536,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) */ static int __btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2603,7 +2602,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) spin_unlock(&ctl->tree_lock); } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; @@ -2625,7 +2624,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) } -u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, +u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size) { @@ -2679,7 +2678,7 @@ out: * cluster and remove the cluster from it. */ int btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { struct btrfs_free_space_ctl *ctl; @@ -2713,7 +2712,7 @@ int btrfs_return_cluster_to_free_space( return ret; } -static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, +static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct btrfs_free_space *entry, u64 bytes, u64 min_start, @@ -2746,7 +2745,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, * if it couldn't find anything suitably large, or a logical disk offset * if things worked out */ -u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, +u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start, u64 *max_extent_size) { @@ -2832,7 +2831,7 @@ out: return ret; } -static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, +static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group, struct btrfs_free_space *entry, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, @@ -2914,7 +2913,7 @@ again: * extent of cont1_bytes, and other clusters of at least min_bytes. */ static noinline int -setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, +setup_cluster_no_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct list_head *bitmaps, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) @@ -3005,7 +3004,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * that we have already failed to find extents that will work. */ static noinline int -setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, +setup_cluster_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct list_head *bitmaps, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) @@ -3055,7 +3054,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, * returns zero and sets up cluster if things worked out, otherwise * it returns -enospc */ -int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, +int btrfs_find_space_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { @@ -3146,7 +3145,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) cluster->block_group = NULL; } -static int do_trimming(struct btrfs_block_group_cache *block_group, +static int do_trimming(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 bytes, u64 reserved_start, u64 reserved_bytes, struct btrfs_trim_range *trim_entry) @@ -3191,7 +3190,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group, return ret; } -static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, +static int trim_no_bitmap(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -3276,7 +3275,7 @@ out: return ret; } -static int trim_bitmaps(struct btrfs_block_group_cache *block_group, +static int trim_bitmaps(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -3357,12 +3356,12 @@ next: return ret; } -void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group_trimming(struct btrfs_block_group *cache) { atomic_inc(&cache->trimming); } -void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) +void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct extent_map_tree *em_tree; @@ -3397,7 +3396,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) } } -int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, +int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { int ret; @@ -3595,7 +3594,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, * how the free space cache loading stuff works, so you can get really weird * configurations. */ -int test_add_free_space_entry(struct btrfs_block_group_cache *cache, +int test_add_free_space_entry(struct btrfs_block_group *cache, u64 offset, u64 bytes, bool bitmap) { struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; @@ -3663,7 +3662,7 @@ again: * just used to check the absence of space, so if there is free space in the * range at all we will return 1. */ -int test_check_exists(struct btrfs_block_group_cache *cache, +int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes) { struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 39c32c8fc24f..ba9a23241101 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -50,24 +50,23 @@ struct btrfs_io_ctl { unsigned check_crcs:1; }; -struct inode *lookup_free_space_inode( - struct btrfs_block_group_cache *block_group, +struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, struct btrfs_path *path); int create_free_space_inode(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode); -int load_free_space_cache(struct btrfs_block_group_cache *block_group); +int load_free_space_cache(struct btrfs_block_group *block_group); int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int btrfs_write_out_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); struct inode *lookup_free_ino_inode(struct btrfs_root *root, struct btrfs_path *path); @@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct btrfs_path *path, struct inode *inode); -void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); +void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group); int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); -int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); -u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, +void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group); +u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size); u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); -void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, +void btrfs_dump_free_space(struct btrfs_block_group *block_group, u64 bytes); -int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, +int btrfs_find_space_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size); void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); -u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, +u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start, u64 *max_extent_size); int btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster); -int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, +int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen); /* Support functions for running our sanity tests */ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS -int test_add_free_space_entry(struct btrfs_block_group_cache *cache, +int test_add_free_space_entry(struct btrfs_block_group *cache, u64 offset, u64 bytes, bool bitmap); -int test_check_exists(struct btrfs_block_group_cache *cache, - u64 offset, u64 bytes); +int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes); #endif #endif diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index d3aa65a4c76f..258cb3fae17a 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -13,10 +13,10 @@ #include "block-group.h" static int __add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); -void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) +void set_free_space_tree_thresholds(struct btrfs_block_group *cache) { u32 bitmap_range; size_t bitmap_size; @@ -44,7 +44,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) } static int add_new_free_space_info(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_root *root = trans->fs_info->free_space_root; @@ -77,7 +77,7 @@ out: EXPORT_FOR_TESTS struct btrfs_free_space_info *search_free_space_info( struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int cow) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -179,7 +179,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len) EXPORT_FOR_TESTS int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -319,7 +319,7 @@ out: EXPORT_FOR_TESTS int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -452,7 +452,7 @@ out: } static int update_free_space_extent_count(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int new_extents) { @@ -490,7 +490,7 @@ out: } EXPORT_FOR_TESTS -int free_space_test_bit(struct btrfs_block_group_cache *block_group, +int free_space_test_bit(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 offset) { struct extent_buffer *leaf; @@ -512,7 +512,7 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group, return !!extent_buffer_test_bit(leaf, ptr, i); } -static void free_space_set_bits(struct btrfs_block_group_cache *block_group, +static void free_space_set_bits(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 *start, u64 *size, int bit) { @@ -580,7 +580,7 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans, * the bitmap. */ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size, int remove) { @@ -693,7 +693,7 @@ out: } static int remove_free_space_extent(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { @@ -780,7 +780,7 @@ out: EXPORT_FOR_TESTS int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; @@ -811,7 +811,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, int remove_from_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; @@ -845,7 +845,7 @@ out: } static int add_free_space_extent(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { @@ -973,7 +973,7 @@ out: EXPORT_FOR_TESTS int __add_to_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; @@ -1004,7 +1004,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans, int add_to_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; @@ -1042,7 +1042,7 @@ out: * through the normal add/remove hooks. */ static int populate_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_root *extent_root = trans->fs_info->extent_root; struct btrfs_path *path, *path2; @@ -1139,7 +1139,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *free_space_root; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct rb_node *node; int ret; @@ -1158,7 +1158,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) node = rb_first(&fs_info->block_group_cache_tree); while (node) { - block_group = rb_entry(node, struct btrfs_block_group_cache, + block_group = rb_entry(node, struct btrfs_block_group, cache_node); ret = populate_free_space_tree(trans, block_group); if (ret) @@ -1264,7 +1264,7 @@ abort: } static int __add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { int ret; @@ -1281,7 +1281,7 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, } int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_path *path = NULL; @@ -1311,7 +1311,7 @@ out: } int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_root *root = trans->fs_info->free_space_root; struct btrfs_path *path; @@ -1390,7 +1390,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; @@ -1471,7 +1471,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; @@ -1531,7 +1531,7 @@ out: int load_free_space_tree(struct btrfs_caching_control *caching_ctl) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_free_space_info *info; struct btrfs_path *path; u32 extent_count, flags; diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h index 360d50e1cdea..dc2463e4cfe3 100644 --- a/fs/btrfs/free-space-tree.h +++ b/fs/btrfs/free-space-tree.h @@ -16,14 +16,14 @@ struct btrfs_caching_control; #define BTRFS_FREE_SPACE_BITMAP_SIZE 256 #define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE) -void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group); +void set_free_space_tree_thresholds(struct btrfs_block_group *block_group); int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info); int load_free_space_tree(struct btrfs_caching_control *caching_ctl); int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group); + struct btrfs_block_group *block_group); int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group); + struct btrfs_block_group *block_group); int add_to_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size); int remove_from_free_space_tree(struct btrfs_trans_handle *trans, @@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_free_space_info * search_free_space_info(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int cow); int __add_to_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size); int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size); int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); -int free_space_test_bit(struct btrfs_block_group_cache *block_group, +int free_space_test_bit(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 offset); #endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5b48830513e0..6e9183f58722 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3040,7 +3040,7 @@ out_kfree: static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, u64 start, u64 len) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(fs_info, start); ASSERT(cache); @@ -10837,7 +10837,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, start = 0; while (start < isize) { u64 logical_block_start, physical_block_start; - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; u64 len = isize - start; em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4cf255830bc5..a1ee0b775e65 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4031,7 +4031,7 @@ out: static void get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; space->total_bytes = 0; space->used_bytes = 0; diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index f414fd914ddd..93aeb2e539a4 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3823,7 +3823,7 @@ out: */ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *subvol_root, - struct btrfs_block_group_cache *bg, + struct btrfs_block_group *bg, struct extent_buffer *subvol_parent, int subvol_slot, struct extent_buffer *reloc_parent, int reloc_slot, u64 last_snapshot) diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 46ba7bd2961c..236f12224d52 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -408,7 +408,7 @@ void btrfs_qgroup_init_swapped_blocks( void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root); int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *subvol_root, - struct btrfs_block_group_cache *bg, + struct btrfs_block_group *bg, struct extent_buffer *subvol_parent, int subvol_slot, struct extent_buffer *reloc_parent, int reloc_slot, u64 last_snapshot); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 907c5d79a197..243a2e44526e 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -227,7 +227,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, struct btrfs_fs_info *fs_info = dev->fs_info; int ret; struct reada_zone *zone; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; u64 start; u64 end; int i; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 231aedd0ec52..a857fc8271d2 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -147,7 +147,7 @@ struct file_extent_cluster { struct reloc_control { /* block group to relocate */ - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; /* extent tree */ struct btrfs_root *extent_root; /* inode for moving data */ @@ -1560,8 +1560,7 @@ again: return NULL; } -static int in_block_group(u64 bytenr, - struct btrfs_block_group_cache *block_group) +static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group) { if (bytenr >= block_group->start && bytenr < block_group->start + block_group->length) @@ -3544,7 +3543,7 @@ static int block_use_full_backref(struct reloc_control *rc, } static int delete_block_group_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode, u64 ino) { @@ -4219,7 +4218,7 @@ out: */ static noinline_for_stack struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *group) + struct btrfs_block_group *group) { struct inode *inode = NULL; struct btrfs_trans_handle *trans; @@ -4283,7 +4282,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) * Print the block group being relocated */ static void describe_relocation(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { char buf[128] = {'\0'}; @@ -4299,7 +4298,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info, */ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; struct btrfs_root *extent_root = fs_info->extent_root; struct reloc_control *rc; struct inode *inode; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 06494304ab80..e2c87220600f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -389,8 +389,7 @@ static struct full_stripe_lock *search_full_stripe_lock( * * Caller must ensure @cache is a RAID56 block group. */ -static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, - u64 bytenr) +static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr) { u64 ret; @@ -423,7 +422,7 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { - struct btrfs_block_group_cache *bg_cache; + struct btrfs_block_group *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; @@ -470,7 +469,7 @@ out: static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { - struct btrfs_block_group_cache *bg_cache; + struct btrfs_block_group *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; @@ -3417,7 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct extent_map_tree *map_tree = &fs_info->mapping_tree; @@ -3481,7 +3480,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 22b4968699e1..f09aa6ee9113 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -284,7 +284,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, int dump_block_groups) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int index = 0; spin_lock(&info->lock); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index a1da1f4a511d..5ebbe8a5ee76 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -397,7 +397,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, { struct btrfs_space_info *sinfo = to_space_info(kobj->parent); - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags); u64 val = 0; @@ -861,7 +861,7 @@ static void init_feature_attrs(void) * Create a sysfs entry for a given block group type at path * /sys/fs/btrfs/UUID/allocation/data/TYPE */ -void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_space_info *space_info = cache->space_info; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 610e9c36a94c..e10c3adfc30f 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -32,7 +32,7 @@ int __init btrfs_init_sysfs(void); void __cold btrfs_exit_sysfs(void); int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); -void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache); int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info); diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index f4bb5e2a4ba5..a7aca4141788 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -202,11 +202,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root) kfree(root); } -struct btrfs_block_group_cache * +struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (!cache) @@ -232,7 +232,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, return cache; } -void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache) +void btrfs_free_dummy_block_group(struct btrfs_block_group *cache) { if (!cache) return; diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h index ee277bbd939b..9e52527357d8 100644 --- a/fs/btrfs/tests/btrfs-tests.h +++ b/fs/btrfs/tests/btrfs-tests.h @@ -41,9 +41,9 @@ struct inode *btrfs_new_test_inode(void); struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize); void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info); void btrfs_free_dummy_root(struct btrfs_root *root); -struct btrfs_block_group_cache * +struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length); -void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); +void btrfs_free_dummy_block_group(struct btrfs_block_group *cache); void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); #else diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index 43ec7060fcd2..aebdf23f0cdd 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -17,7 +17,7 @@ * entry and remove space from either end and the middle, and make sure we can * remove space that covers adjacent extent entries. */ -static int test_extents(struct btrfs_block_group_cache *cache) +static int test_extents(struct btrfs_block_group *cache) { int ret = 0; @@ -87,8 +87,7 @@ static int test_extents(struct btrfs_block_group_cache *cache) return 0; } -static int test_bitmaps(struct btrfs_block_group_cache *cache, - u32 sectorsize) +static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize) { u64 next_bitmap_offset; int ret; @@ -156,7 +155,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache, } /* This is the high grade jackassery */ -static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache, +static int test_bitmaps_and_extents(struct btrfs_block_group *cache, u32 sectorsize) { u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); @@ -331,7 +330,7 @@ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl, /* Used by test_steal_space_from_bitmap_to_extent(). */ static int -check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, +check_num_extents_and_bitmaps(const struct btrfs_block_group *cache, const int num_extents, const int num_bitmaps) { @@ -351,7 +350,7 @@ check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, } /* Used by test_steal_space_from_bitmap_to_extent(). */ -static int check_cache_empty(struct btrfs_block_group_cache *cache) +static int check_cache_empty(struct btrfs_block_group *cache) { u64 offset; u64 max_extent_size; @@ -393,7 +392,7 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache) * requests. */ static int -test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, +test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache, u32 sectorsize) { int ret; @@ -829,7 +828,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_root *root = NULL; int ret = -ENOMEM; diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 188f08bd44b0..1a846bf6e197 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -18,7 +18,7 @@ struct free_space_extent { static int __check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) @@ -107,7 +107,7 @@ invalid: static int check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) @@ -150,7 +150,7 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans, static int test_empty_block_group(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -164,7 +164,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans, static int test_remove_all(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -185,7 +185,7 @@ static int test_remove_all(struct btrfs_trans_handle *trans, static int test_remove_beginning(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -208,7 +208,7 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans, static int test_remove_end(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -231,7 +231,7 @@ static int test_remove_end(struct btrfs_trans_handle *trans, static int test_remove_middle(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -255,7 +255,7 @@ static int test_remove_middle(struct btrfs_trans_handle *trans, static int test_merge_left(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -292,7 +292,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans, static int test_merge_right(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -330,7 +330,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans, static int test_merge_both(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -373,7 +373,7 @@ static int test_merge_both(struct btrfs_trans_handle *trans, static int test_merge_none(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -418,7 +418,7 @@ static int test_merge_none(struct btrfs_trans_handle *trans, typedef int (*test_func_t)(struct btrfs_trans_handle *, struct btrfs_fs_info *, - struct btrfs_block_group_cache *, + struct btrfs_block_group *, struct btrfs_path *, u32 alignment); @@ -427,7 +427,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, { struct btrfs_fs_info *fs_info; struct btrfs_root *root = NULL; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_trans_handle trans; struct btrfs_path *path = NULL; int ret; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6f133906c862..cfc08ef9b876 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -133,10 +133,10 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) * discard the physical locations of the block groups. */ while (!list_empty(&transaction->deleted_bgs)) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = list_first_entry(&transaction->deleted_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&cache->bg_list); btrfs_put_block_group_trimming(cache); @@ -1937,7 +1937,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group, *tmp; + struct btrfs_block_group *block_group, *tmp; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { btrfs_delayed_refs_rsv_release(fs_info, 1); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 289c34f91996..22a5bd991e47 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2997,7 +2997,7 @@ error: static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 bytes_used; u64 chunk_type; @@ -3206,7 +3206,7 @@ static int chunk_profiles_filter(u64 chunk_type, static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 chunk_used; u64 user_thresh_min; u64 user_thresh_max; @@ -3239,7 +3239,7 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 chunk_used, user_thresh; int ret = 1; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 070619891915..620bf1b38fba 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node; struct btrfs_delayed_tree_ref; struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; -struct btrfs_block_group_cache; +struct btrfs_block_group; struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; @@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs, TRACE_EVENT(btrfs_add_block_group, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_block_group_cache *block_group, int create), + const struct btrfs_block_group *block_group, int create), TP_ARGS(fs_info, block_group, create), @@ -1184,7 +1184,7 @@ TRACE_EVENT(find_free_extent, DECLARE_EVENT_CLASS(btrfs__reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len), @@ -1214,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1222,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1230,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, TRACE_EVENT(btrfs_find_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 bytes, u64 empty_size, u64 min_bytes), TP_ARGS(block_group, start, bytes, empty_size, min_bytes), @@ -1263,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster, TRACE_EVENT(btrfs_failed_cluster_setup, - TP_PROTO(const struct btrfs_block_group_cache *block_group), + TP_PROTO(const struct btrfs_block_group *block_group), TP_ARGS(block_group), @@ -1280,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, TRACE_EVENT(btrfs_setup_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, + TP_PROTO(const struct btrfs_block_group *block_group, const struct btrfs_free_cluster *cluster, u64 size, int bitmap), @@ -1844,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents, ); DECLARE_EVENT_CLASS(btrfs__block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache), @@ -1868,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ); DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); -- cgit v1.2.3-59-g8ed1b From 0395d84f8edc6c28ce270e6eda5c7e6ff4c18545 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 13 Nov 2019 11:27:27 +0100 Subject: btrfs: change btrfs_fs_devices::seeding to bool struct btrfs_fs_devices::seeding currently is declared as an integer variable but only used as a boolean. Change the variable definition to bool and update to code touching it to set 'true' and 'false'. Reviewed-by: Qu Wenruo Reviewed-by: Anand Jain Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 8 ++++---- fs/btrfs/volumes.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 22a5bd991e47..ade40df9b559 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -634,7 +634,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, } clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); - fs_devices->seeding = 1; + fs_devices->seeding = true; } else { if (bdev_read_only(bdev)) clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); @@ -1115,7 +1115,7 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices) WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; - fs_devices->seeding = 0; + fs_devices->seeding = false; return 0; } @@ -2270,7 +2270,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); mutex_unlock(&fs_info->chunk_mutex); - fs_devices->seeding = 0; + fs_devices->seeding = false; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; @@ -6654,7 +6654,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, if (IS_ERR(fs_devices)) return fs_devices; - fs_devices->seeding = 1; + fs_devices->seeding = true; fs_devices->opened = 1; return fs_devices; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 46987a2da786..8e9513b3fe9d 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -243,7 +243,7 @@ struct btrfs_fs_devices { struct list_head alloc_list; struct btrfs_fs_devices *seed; - int seeding; + bool seeding; int opened; -- cgit v1.2.3-59-g8ed1b From 7f0432d0d8dc1d29ab5276805d648c5b0a408105 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 13 Nov 2019 11:27:28 +0100 Subject: btrfs: change btrfs_fs_devices::rotating to bool struct btrfs_fs_devices::rotating currently is declared as an integer variable but only used as a boolean. Change the variable definition to bool and update to code touching it to set 'true' and 'false'. Reviewed-by: Qu Wenruo Reviewed-by: Anand Jain Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 6 +++--- fs/btrfs/volumes.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ade40df9b559..d8e5560db285 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -644,7 +644,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, q = bdev_get_queue(bdev); if (!blk_queue_nonrot(q)) - fs_devices->rotating = 1; + fs_devices->rotating = true; device->bdev = bdev; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); @@ -2274,7 +2274,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; - fs_devices->rotating = 0; + fs_devices->rotating = false; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); @@ -2469,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path atomic64_add(device->total_bytes, &fs_info->free_chunk_space); if (!blk_queue_nonrot(q)) - fs_devices->rotating = 1; + fs_devices->rotating = true; orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); btrfs_set_super_total_bytes(fs_info->super_copy, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 8e9513b3fe9d..fc1b564b9cfe 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -250,7 +250,7 @@ struct btrfs_fs_devices { /* set when we find or add a device that doesn't have the * nonrot flag set */ - int rotating; + bool rotating; struct btrfs_fs_info *fs_info; /* sysfs kobjects */ -- cgit v1.2.3-59-g8ed1b From b12de52896c0e8213f70e3a168fde9e6eee95909 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 15 Nov 2019 10:09:00 +0800 Subject: btrfs: scrub: Don't check free space before marking a block group RO [BUG] When running btrfs/072 with only one online CPU, it has a pretty high chance to fail: btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg) - output mismatch (see xfstests-dev/results//btrfs/072.out.bad) --- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800 +++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800 @@ -1,2 +1,3 @@ QA output created by 072 Silence is golden +Scrub find errors in "-m dup -d single" test ... And with the following call trace: BTRFS info (device dm-5): scrub: started on devid 1 ------------[ cut here ]------------ BTRFS: Transaction aborted (error -27) WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs] CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs] Call Trace: __btrfs_end_transaction+0xdb/0x310 [btrfs] btrfs_end_transaction+0x10/0x20 [btrfs] btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs] scrub_enumerate_chunks+0x264/0x940 [btrfs] btrfs_scrub_dev+0x45c/0x8f0 [btrfs] btrfs_ioctl+0x31a1/0x3fb0 [btrfs] do_vfs_ioctl+0x636/0xaa0 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x43/0x50 do_syscall_64+0x79/0xe0 entry_SYSCALL_64_after_hwframe+0x49/0xbe ---[ end trace 166c865cec7688e7 ]--- [CAUSE] The error number -27 is -EFBIG, returned from the following call chain: btrfs_end_transaction() |- __btrfs_end_transaction() |- btrfs_create_pending_block_groups() |- btrfs_finish_chunk_alloc() |- btrfs_add_system_chunk() This happens because we have used up all space of btrfs_super_block::sys_chunk_array. The root cause is, we have the following bad loop of creating tons of system chunks: 1. The only SYSTEM chunk is being scrubbed It's very common to have only one SYSTEM chunk. 2. New SYSTEM bg will be allocated As btrfs_inc_block_group_ro() will check if we have enough space after marking current bg RO. If not, then allocate a new chunk. 3. New SYSTEM bg is still empty, will be reclaimed During the reclaim, we will mark it RO again. 4. That newly allocated empty SYSTEM bg get scrubbed We go back to step 2, as the bg is already mark RO but still not cleaned up yet. If the cleaner kthread doesn't get executed fast enough (e.g. only one CPU), then we will get more and more empty SYSTEM chunks, using up all the space of btrfs_super_block::sys_chunk_array. [FIX] Since scrub/dev-replace doesn't always need to allocate new extent, especially chunk tree extent, so we don't really need to do chunk pre-allocation. To break above spiral, here we introduce a new parameter to btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we need extra chunk pre-allocation. For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass @do_chunk_alloc=false. This should keep unnecessary empty chunks from popping up for scrub. Also, since there are two parameters for btrfs_inc_block_group_ro(), add more comment for it. Reviewed-by: Filipe Manana Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 48 +++++++++++++++++++++++++++++++----------------- fs/btrfs/block-group.h | 3 ++- fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 21 ++++++++++++++++++++- 4 files changed, 54 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index d96561d1ce90..6934a5b8708f 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2017,8 +2017,17 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) return flags; } -int btrfs_inc_block_group_ro(struct btrfs_block_group *cache) - +/* + * Mark one block group RO, can be called several times for the same block + * group. + * + * @cache: the destination block group + * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to + * ensure we still have some free space after marking this + * block group RO. + */ +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, + bool do_chunk_alloc) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_trans_handle *trans; @@ -2048,25 +2057,29 @@ again: goto again; } - /* - * if we are changing raid levels, try to allocate a corresponding - * block group with the new raid level. - */ - alloc_flags = update_block_group_flags(fs_info, cache->flags); - if (alloc_flags != cache->flags) { - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + if (do_chunk_alloc) { /* - * ENOSPC is allowed here, we may have enough space - * already allocated at the new raid level to - * carry on + * If we are changing raid levels, try to allocate a + * corresponding block group with the new raid level. */ - if (ret == -ENOSPC) - ret = 0; - if (ret < 0) - goto out; + alloc_flags = update_block_group_flags(fs_info, cache->flags); + if (alloc_flags != cache->flags) { + ret = btrfs_chunk_alloc(trans, alloc_flags, + CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } } - ret = inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, !do_chunk_alloc); + if (!do_chunk_alloc) + goto unlock_out; if (!ret) goto out; alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); @@ -2081,6 +2094,7 @@ out: check_system_chunk(trans, alloc_flags); mutex_unlock(&fs_info->chunk_mutex); } +unlock_out: mutex_unlock(&fs_info->ro_block_group_mutex); btrfs_end_transaction(trans); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 4e7afc028791..9b409676c4b2 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -206,7 +206,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); -int btrfs_inc_block_group_ro(struct btrfs_block_group *cache); +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, + bool do_chunk_alloc); void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index a857fc8271d2..2e16701c6099 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4325,7 +4325,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) rc->extent_root = extent_root; rc->block_group = bg; - ret = btrfs_inc_block_group_ro(rc->block_group); + ret = btrfs_inc_block_group_ro(rc->block_group, true); if (ret) { err = ret; goto out; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index e2c87220600f..bd3f2266c5f4 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3559,7 +3559,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); - ret = btrfs_inc_block_group_ro(cache); + + /* + * Don't do chunk preallocation for scrub. + * + * This is especially important for SYSTEM bgs, or we can hit + * -EFBIG from btrfs_finish_chunk_alloc() like: + * 1. The only SYSTEM bg is marked RO. + * Since SYSTEM bg is small, that's pretty common. + * 2. New SYSTEM bg will be allocated + * Due to regular version will allocate new chunk. + * 3. New SYSTEM bg is empty and will get cleaned up + * Before cleanup really happens, it's marked RO again. + * 4. Empty SYSTEM bg get scrubbed + * We go back to 2. + * + * This can easily boost the amount of SYSTEM chunks if cleaner + * thread can't be triggered fast enough, and use up all space + * of btrfs_super_block::sys_chunk_array + */ + ret = btrfs_inc_block_group_ro(cache, false); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks -- cgit v1.2.3-59-g8ed1b From 042528f8d840f42903d91d47e28c0e29da90c2d6 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 14 Nov 2019 18:02:43 +0000 Subject: Btrfs: fix block group remaining RO forever after error during device replace When doing a device replace, while at scrub.c:scrub_enumerate_chunks(), we set the block group to RO mode and then wait for any ongoing writes into extents of the block group to complete. While doing that wait we overwrite the value of the variable 'ret' and can break out of the loop if an error happens without turning the block group back into RW mode. So what happens is the following: 1) btrfs_inc_block_group_ro() returns 0, meaning it set the block group to RO mode (its ->ro field set to 1 or incremented to some value > 1); 2) Then btrfs_wait_ordered_roots() returns a value > 0; 3) Then if either joining or committing the transaction fails, we break out of the loop wihtout calling btrfs_dec_block_group_ro(), leaving the block group in RO mode forever. To fix this, just remove the code that waits for ongoing writes to extents of the block group, since it's not needed because in the initial setup phase of a device replace operation, before starting to find all chunks and their extents, we set the target device for replace while holding fs_info->dev_replace->rwsem, which ensures that after releasing that semaphore, any writes into the source device are made to the target device as well (__btrfs_map_block() guarantees that). So while at scrub_enumerate_chunks() we only need to worry about finding and copying extents (from the source device to the target device) that were written before we started the device replace operation. Fixes: f0e9b7d6401959 ("Btrfs: fix race setting block group readonly during device replace") Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ordered-data.c | 6 +----- fs/btrfs/ordered-data.h | 2 +- fs/btrfs/scrub.c | 39 --------------------------------------- 3 files changed, 2 insertions(+), 45 deletions(-) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 6240a5a1f2c0..fb09bc2f8e4d 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -572,12 +572,11 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, return count; } -u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, const u64 range_start, const u64 range_len) { struct btrfs_root *root; struct list_head splice; - u64 total_done = 0; u64 done; INIT_LIST_HEAD(&splice); @@ -597,7 +596,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, done = btrfs_wait_ordered_extents(root, nr, range_start, range_len); btrfs_put_fs_root(root); - total_done += done; spin_lock(&fs_info->ordered_root_lock); if (nr != U64_MAX) { @@ -607,8 +605,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, list_splice_tail(&splice, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); mutex_unlock(&fs_info->ordered_operations_mutex); - - return total_done; } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 5204171ea962..4eb0319a86d7 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -186,7 +186,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u8 *sum, int len); u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, const u64 range_start, const u64 range_len); -u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, const u64 range_start, const u64 range_len); void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, struct btrfs_inode *inode, u64 start, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bd3f2266c5f4..21de630b0730 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3579,45 +3579,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * of btrfs_super_block::sys_chunk_array */ ret = btrfs_inc_block_group_ro(cache, false); - if (!ret && sctx->is_dev_replace) { - /* - * If we are doing a device replace wait for any tasks - * that started delalloc right before we set the block - * group to RO mode, as they might have just allocated - * an extent from it or decided they could do a nocow - * write. And if any such tasks did that, wait for their - * ordered extents to complete and then commit the - * current transaction, so that we can later see the new - * extent items in the extent tree - the ordered extents - * create delayed data references (for cow writes) when - * they complete, which will be run and insert the - * corresponding extent items into the extent tree when - * we commit the transaction they used when running - * inode.c:btrfs_finish_ordered_io(). We later use - * the commit root of the extent tree to find extents - * to copy from the srcdev into the tgtdev, and we don't - * want to miss any new extents. - */ - btrfs_wait_block_group_reservations(cache); - btrfs_wait_nocow_writers(cache); - ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, - cache->start, - cache->length); - if (ret > 0) { - struct btrfs_trans_handle *trans; - - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - ret = PTR_ERR(trans); - else - ret = btrfs_commit_transaction(trans); - if (ret) { - scrub_pause_off(fs_info); - btrfs_put_block_group(cache); - break; - } - } - } scrub_pause_off(fs_info); if (ret == 0) { -- cgit v1.2.3-59-g8ed1b From 3e1740993e43116b3bc71b0aad1e6872f6ccf341 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 15 Nov 2019 15:43:06 -0500 Subject: btrfs: record all roots for rename exchange on a subvol Testing with the new fsstress support for subvolumes uncovered a pretty bad problem with rename exchange on subvolumes. We're modifying two different subvolumes, but we only start the transaction on one of them, so the other one is not added to the dirty root list. This is caught by btrfs_cow_block() with a warning because the root has not been updated, however if we do not modify this root again we'll end up pointing at an invalid root because the root item is never updated. Fix this by making sure we add the destination root to the trans list, the same as we do with normal renames. This fixes the corruption. Fixes: cdd1fedf8261 ("btrfs: add support for RENAME_EXCHANGE and RENAME_WHITEOUT") CC: stable@vger.kernel.org # 4.9+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/inode.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6e9183f58722..b13c212b1bed 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9582,6 +9582,9 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_notrans; } + if (dest != root) + btrfs_record_root_in_trans(trans, dest); + /* * We need to find a free sequence number both in the source and * in the destination directory for the exchange. -- cgit v1.2.3-59-g8ed1b From 429aebc0a9a063667dba21244386f96e5b4d7330 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 18 Nov 2019 23:27:55 +0100 Subject: btrfs: get bdev directly from fs_devices in submit_extent_page This is preparatory patch to remove @bdev parameter from submit_extent_page. It can't be removed completely, because the cgroups need it for wbc when initializing the bio wbc_init_bio bio_associate_blkg_from_css dereference bdev->bi_disk->queue The bdev pointer is the same as latest_bdev, thus no functional change. We can retrieve it from fs_devices that's reachable through several dereferences. The local variable shadows the parameter, but that's only temporary. Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ceb3c028894e..9a2339a54a9d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2987,13 +2987,16 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, } bio = btrfs_bio_alloc(offset); - bio_set_dev(bio, bdev); bio_add_page(bio, page, page_size, pg_offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; bio->bi_write_hint = page->mapping->host->i_write_hint; bio->bi_opf = opf; if (wbc) { + struct block_device *bdev; + + bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev; + bio_set_dev(bio, bdev); wbc_init_bio(wbc, bio); wbc_account_cgroup_owner(wbc, page, page_size); } -- cgit v1.2.3-59-g8ed1b From 1a41802701ec78ca3272073e60463c13b17d121f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 30 Aug 2019 19:39:19 +0200 Subject: btrfs: drop bio_set_dev where not needed bio_set_dev sets a bdev to a bio and is not only setting a pointer bug also changing some state bits if there was a different bdev set before. This is one thing that's not needed. Another thing is that setting a bdev at bio allocation time is too early and actually does not work with plain redundancy profiles, where each time we submit a bio to a device, the bdev is set correctly. In many places the bio bdev is set to latest_bdev that seems to serve as a stub pointer "just to put something to bio". But we don't have to do that. Where do we know which bdev to set: * for regular IO: submit_stripe_bio that's called by btrfs_map_bio * repair IO: repair_io_failure, read or write from specific device * super block write (using buffer_heads but uses raw bdev) and barriers * scrub: this does not use all regular IO paths as it needs to reach all copies, verify and fixup eventually, and for that all bdev management is independent * raid56: rbio_add_io_page, for the RMW write * integrity-checker: does it's own low-level block tracking Signed-off-by: David Sterba --- fs/btrfs/compression.c | 10 ---------- fs/btrfs/extent_io.c | 1 - 2 files changed, 11 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 53aee0db9d71..ee834ef7beb4 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -422,7 +422,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, int pg_index = 0; struct page *page; u64 first_byte = disk_start; - struct block_device *bdev; blk_status_t ret; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; @@ -441,10 +440,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, cb->orig_bio = NULL; cb->nr_pages = nr_pages; - bdev = fs_info->fs_devices->latest_bdev; - bio = btrfs_bio_alloc(first_byte); - bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; @@ -492,7 +488,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, } bio = btrfs_bio_alloc(first_byte); - bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; @@ -660,7 +655,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, unsigned long nr_pages; unsigned long pg_index; struct page *page; - struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; u64 em_len; @@ -711,8 +705,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (!cb->compressed_pages) goto fail1; - bdev = fs_info->fs_devices->latest_bdev; - for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); @@ -731,7 +723,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = bio->bi_iter.bi_size; comp_bio = btrfs_bio_alloc(cur_disk_byte); - bio_set_dev(comp_bio, bdev); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -782,7 +773,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, } comp_bio = btrfs_bio_alloc(cur_disk_byte); - bio_set_dev(comp_bio, bdev); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9a2339a54a9d..c4fcfd7c8013 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2544,7 +2544,6 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, bio = btrfs_io_bio_alloc(1); bio->bi_end_io = endio_func; bio->bi_iter.bi_sector = failrec->logical >> 9; - bio_set_dev(bio, fs_info->fs_devices->latest_bdev); bio->bi_iter.bi_size = 0; bio->bi_private = data; -- cgit v1.2.3-59-g8ed1b From a019e9e197eaa68ffe2efeba00d685581b1a5416 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 30 Aug 2019 15:40:53 +0200 Subject: btrfs: remove extent_map::bdev We can now remove the bdev from extent_map. Previous patches made sure that bio_set_dev is correctly in all places and that we don't need to grab it from latest_bdev or pass it around inside the extent map. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 3 --- fs/btrfs/extent_io.c | 2 -- fs/btrfs/extent_map.c | 3 --- fs/btrfs/extent_map.h | 11 ++--------- fs/btrfs/file-item.c | 1 - fs/btrfs/file.c | 3 --- fs/btrfs/inode.c | 8 -------- fs/btrfs/relocation.c | 2 -- 8 files changed, 2 insertions(+), 31 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8783d86f487d..e0edfdc9c82b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -205,7 +205,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { - struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; @@ -213,7 +212,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (em) { - em->bdev = fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); goto out; } @@ -228,7 +226,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, em->len = (u64)-1; em->block_len = (u64)-1; em->block_start = 0; - em->bdev = fs_info->fs_devices->latest_bdev; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c4fcfd7c8013..586f9a10e863 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3154,7 +3154,6 @@ static int __do_readpage(struct extent_io_tree *tree, offset = em->block_start + extent_offset; disk_io_size = iosize; } - bdev = em->bdev; block_start = em->block_start; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) block_start = EXTENT_MAP_HOLE; @@ -3491,7 +3490,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, iosize = min(em_end - cur, end - cur + 1); iosize = ALIGN(iosize, blocksize); offset = em->block_start + extent_offset; - bdev = em->bdev; block_start = em->block_start; compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); free_extent_map(em); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 9f99dccbc3ca..6f417ff68980 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -218,9 +218,6 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) && test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags)); - if (prev->bdev || next->bdev) - ASSERT(prev->bdev == next->bdev); - if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->map_lookup == next->map_lookup && diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 3eb9c596b445..8e217337dff9 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -42,15 +42,8 @@ struct extent_map { u64 block_len; u64 generation; unsigned long flags; - struct { - struct block_device *bdev; - - /* - * used for chunk mappings - * flags & EXTENT_FLAG_FS_MAPPING must be set - */ - struct map_lookup *map_lookup; - }; + /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */ + struct map_lookup *map_lookup; refcount_t refs; unsigned int compress_type; struct list_head list; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 1a599f50837b..3270a40b0777 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, u8 type = btrfs_file_extent_type(leaf, fi); int compress_type = btrfs_file_extent_compression(leaf, fi); - em->bdev = fs_info->fs_devices->latest_bdev; btrfs_item_key_to_cpu(leaf, &key, slot); extent_start = key.offset; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 32e620981485..0cb43b682789 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, } split->generation = gen; - split->bdev = em->bdev; split->flags = flags; split->compress_type = em->compress_type; replace_extent_mapping(em_tree, em, split, modified); @@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, split->start = start + len; split->len = em->start + em->len - (start + len); - split->bdev = em->bdev; split->flags = flags; split->compress_type = em->compress_type; split->generation = gen; @@ -2360,7 +2358,6 @@ out: hole_em->block_start = EXTENT_MAP_HOLE; hole_em->block_len = 0; hole_em->orig_block_len = 0; - hole_em->bdev = fs_info->fs_devices->latest_bdev; hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->generation = trans->transid; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b13c212b1bed..56032c518b26 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5189,7 +5189,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) hole_em->block_len = 0; hole_em->orig_block_len = 0; hole_em->ram_bytes = hole_size; - hole_em->bdev = fs_info->fs_devices->latest_bdev; hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->generation = fs_info->generation; @@ -6967,8 +6966,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); - if (em) - em->bdev = fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); if (em) { @@ -6984,7 +6981,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, err = -ENOMEM; goto out; } - em->bdev = fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->orig_start = EXTENT_MAP_HOLE; em->len = (u64)-1; @@ -7243,7 +7239,6 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, err = -ENOMEM; goto out; } - em->bdev = NULL; ASSERT(hole_em); /* @@ -7603,7 +7598,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, { struct extent_map_tree *em_tree; struct extent_map *em; - struct btrfs_root *root = BTRFS_I(inode)->root; int ret; ASSERT(type == BTRFS_ORDERED_PREALLOC || @@ -7621,7 +7615,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, em->len = len; em->block_len = block_len; em->block_start = block_start; - em->bdev = root->fs_info->fs_devices->latest_bdev; em->orig_block_len = orig_block_len; em->ram_bytes = ram_bytes; em->generation = -1; @@ -10484,7 +10477,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, em->block_len = ins.offset; em->orig_block_len = ins.offset; em->ram_bytes = ins.offset; - em->bdev = fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PREALLOC, &em->flags); em->generation = trans->transid; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 2e16701c6099..d897a8e5e430 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3194,7 +3194,6 @@ static noinline_for_stack int setup_extent_mapping(struct inode *inode, u64 start, u64 end, u64 block_start) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; int ret = 0; @@ -3207,7 +3206,6 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, em->len = end + 1 - start; em->block_len = em->len; em->block_start = block_start; - em->bdev = fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); lock_extent(&BTRFS_I(inode)->io_tree, start, end); -- cgit v1.2.3-59-g8ed1b From fa17ed069c61286b26382e23b57a62930657b9c1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 3 Oct 2019 17:29:05 +0200 Subject: btrfs: drop bdev argument from submit_extent_page After previous patches removing bdev being passed around to set it to bio, it has become unused in submit_extent_page. So it now has "only" 13 parameters. Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 586f9a10e863..eb8bd0258360 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2929,7 +2929,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size) * a contiguous page to the previous one * @size: portion of page that we want to write * @offset: starting offset in the page - * @bdev: attach newly created bios to this bdev * @bio_ret: must be valid pointer, newly allocated bio will be stored there * @end_io_func: end_io callback for new bio * @mirror_num: desired mirror to read/write @@ -2940,7 +2939,6 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, struct writeback_control *wbc, struct page *page, u64 offset, size_t size, unsigned long pg_offset, - struct block_device *bdev, struct bio **bio_ret, bio_end_io_t end_io_func, int mirror_num, @@ -3077,7 +3075,6 @@ static int __do_readpage(struct extent_io_tree *tree, u64 block_start; u64 cur_end; struct extent_map *em; - struct block_device *bdev; int ret = 0; int nr = 0; size_t pg_offset = 0; @@ -3243,7 +3240,7 @@ static int __do_readpage(struct extent_io_tree *tree, ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL, page, offset, disk_io_size, - pg_offset, bdev, bio, + pg_offset, bio, end_bio_extent_readpage, mirror_num, *bio_flags, this_bio_flag, @@ -3431,7 +3428,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, u64 block_start; u64 iosize; struct extent_map *em; - struct block_device *bdev; size_t pg_offset = 0; size_t blocksize; int ret = 0; @@ -3531,7 +3527,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, page, offset, iosize, pg_offset, - bdev, &epd->bio, + &epd->bio, end_bio_extent_writepage, 0, 0, 0, false); if (ret) { @@ -3857,7 +3853,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, struct extent_page_data *epd) { struct btrfs_fs_info *fs_info = eb->fs_info; - struct block_device *bdev = fs_info->fs_devices->latest_bdev; struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; u64 offset = eb->start; u32 nritems; @@ -3892,7 +3887,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, - p, offset, PAGE_SIZE, 0, bdev, + p, offset, PAGE_SIZE, 0, &epd->bio, end_bio_extent_buffer_writepage, 0, 0, 0, false); -- cgit v1.2.3-59-g8ed1b