From 0e56315ca147b3e60c7bf240233a301d3c7fb508 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 19 Nov 2019 12:07:33 +0000 Subject: Btrfs: fix missing hole after hole punching and fsync when using NO_HOLES When using the NO_HOLES feature, if we punch a hole into a file and then fsync it, there are cases where a subsequent fsync will miss the fact that a hole was punched, resulting in the holes not existing after replaying the log tree. Essentially these cases all imply that, tree-log.c:copy_items(), is not invoked for the leafs that delimit holes, because nothing changed those leafs in the current transaction. And it's precisely copy_items() where we currenly detect and log holes, which works as long as the holes are between file extent items in the input leaf or between the beginning of input leaf and the previous leaf or between the last item in the leaf and the next leaf. First example where we miss a hole: *) The extent items of the inode span multiple leafs; *) The punched hole covers a range that affects only the extent items of the first leaf; *) The fsync operation is done in full mode (BTRFS_INODE_NEEDS_FULL_SYNC is set in the inode's runtime flags). That results in the hole not existing after replaying the log tree. For example, if the fs/subvolume tree has the following layout for a particular inode: Leaf N, generation 10: [ ... INODE_ITEM INODE_REF EXTENT_ITEM (0 64K) EXTENT_ITEM (64K 128K) ] Leaf N + 1, generation 10: [ EXTENT_ITEM (128K 64K) ... ] If at transaction 11 we punch a hole coverting the range [0, 128K[, we end up dropping the two extent items from leaf N, but we don't touch the other leaf, so we end up in the following state: Leaf N, generation 11: [ ... INODE_ITEM INODE_REF ] Leaf N + 1, generation 10: [ EXTENT_ITEM (128K 64K) ... ] A full fsync after punching the hole will only process leaf N because it was modified in the current transaction, but not leaf N + 1, since it was not modified in the current transaction (generation 10 and not 11). As a result the fsync will not log any holes, because it didn't process any leaf with extent items. Second example where we will miss a hole: *) An inode as its items spanning 5 (or more) leafs; *) A hole is punched and it covers only the extents items of the 3rd leaf. This resulsts in deleting the entire leaf and not touching any of the other leafs. So the only leaf that is modified in the current transaction, when punching the hole, is the first leaf, which contains the inode item. During the full fsync, the only leaf that is passed to copy_items() is that first leaf, and that's not enough for the hole detection code in copy_items() to determine there's a hole between the last file extent item in the 2nd leaf and the first file extent item in the 3rd leaf (which was the 4th leaf before punching the hole). Fix this by scanning all leafs and punch holes as necessary when doing a full fsync (less common than a non-full fsync) when the NO_HOLES feature is enabled. The lack of explicit file extent items to mark holes makes it necessary to scan existing extents to determine if holes exist. A test case for fstests follows soon. Fixes: 16e7549f045d33 ("Btrfs: incompatible format change to remove hole extents") CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 388 ++++++++++++++-------------------------------------- 1 file changed, 100 insertions(+), 288 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d3f115909ff0..44480762003c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3935,7 +3935,7 @@ static int log_csums(struct btrfs_trans_handle *trans, static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *dst_path, - struct btrfs_path *src_path, u64 *last_extent, + struct btrfs_path *src_path, int start_slot, int nr, int inode_only, u64 logged_isize) { @@ -3946,7 +3946,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item *extent; struct btrfs_inode_item *inode_item; struct extent_buffer *src = src_path->nodes[0]; - struct btrfs_key first_key, last_key, key; int ret; struct btrfs_key *ins_keys; u32 *ins_sizes; @@ -3954,9 +3953,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, int i; struct list_head ordered_sums; int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; - bool has_extents = false; - bool need_find_last_extent = true; - bool done = false; INIT_LIST_HEAD(&ordered_sums); @@ -3965,8 +3961,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, if (!ins_data) return -ENOMEM; - first_key.objectid = (u64)-1; - ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); @@ -3987,9 +3981,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, src_offset = btrfs_item_ptr_offset(src, start_slot + i); - if (i == nr - 1) - last_key = ins_keys[i]; - if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_path->slots[0], @@ -4003,20 +3994,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, src_offset, ins_sizes[i]); } - /* - * We set need_find_last_extent here in case we know we were - * processing other items and then walk into the first extent in - * the inode. If we don't hit an extent then nothing changes, - * we'll do the last search the next time around. - */ - if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { - has_extents = true; - if (first_key.objectid == (u64)-1) - first_key = ins_keys[i]; - } else { - need_find_last_extent = false; - } - /* take a reference on file data extents so that truncates * or deletes of this inode don't have to relog the inode * again @@ -4082,167 +4059,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, kfree(sums); } - if (!has_extents) - return ret; - - if (need_find_last_extent && *last_extent == first_key.offset) { - /* - * We don't have any leafs between our current one and the one - * we processed before that can have file extent items for our - * inode (and have a generation number smaller than our current - * transaction id). - */ - need_find_last_extent = false; - } - - /* - * Because we use btrfs_search_forward we could skip leaves that were - * not modified and then assume *last_extent is valid when it really - * isn't. So back up to the previous leaf and read the end of the last - * extent before we go and fill in holes. - */ - if (need_find_last_extent) { - u64 len; - - ret = btrfs_prev_leaf(inode->root, src_path); - if (ret < 0) - return ret; - if (ret) - goto fill_holes; - if (src_path->slots[0]) - src_path->slots[0]--; - src = src_path->nodes[0]; - btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); - if (key.objectid != btrfs_ino(inode) || - key.type != BTRFS_EXTENT_DATA_KEY) - goto fill_holes; - extent = btrfs_item_ptr(src, src_path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(src, extent) == - BTRFS_FILE_EXTENT_INLINE) { - len = btrfs_file_extent_ram_bytes(src, extent); - *last_extent = ALIGN(key.offset + len, - fs_info->sectorsize); - } else { - len = btrfs_file_extent_num_bytes(src, extent); - *last_extent = key.offset + len; - } - } -fill_holes: - /* So we did prev_leaf, now we need to move to the next leaf, but a few - * things could have happened - * - * 1) A merge could have happened, so we could currently be on a leaf - * that holds what we were copying in the first place. - * 2) A split could have happened, and now not all of the items we want - * are on the same leaf. - * - * So we need to adjust how we search for holes, we need to drop the - * path and re-search for the first extent key we found, and then walk - * forward until we hit the last one we copied. - */ - if (need_find_last_extent) { - /* btrfs_prev_leaf could return 1 without releasing the path */ - btrfs_release_path(src_path); - ret = btrfs_search_slot(NULL, inode->root, &first_key, - src_path, 0, 0); - if (ret < 0) - return ret; - ASSERT(ret == 0); - src = src_path->nodes[0]; - i = src_path->slots[0]; - } else { - i = start_slot; - } - - /* - * Ok so here we need to go through and fill in any holes we may have - * to make sure that holes are punched for those areas in case they had - * extents previously. - */ - while (!done) { - u64 offset, len; - u64 extent_end; - - if (i >= btrfs_header_nritems(src_path->nodes[0])) { - ret = btrfs_next_leaf(inode->root, src_path); - if (ret < 0) - return ret; - ASSERT(ret == 0); - src = src_path->nodes[0]; - i = 0; - need_find_last_extent = true; - } - - btrfs_item_key_to_cpu(src, &key, i); - if (!btrfs_comp_cpu_keys(&key, &last_key)) - done = true; - if (key.objectid != btrfs_ino(inode) || - key.type != BTRFS_EXTENT_DATA_KEY) { - i++; - continue; - } - extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(src, extent) == - BTRFS_FILE_EXTENT_INLINE) { - len = btrfs_file_extent_ram_bytes(src, extent); - extent_end = ALIGN(key.offset + len, - fs_info->sectorsize); - } else { - len = btrfs_file_extent_num_bytes(src, extent); - extent_end = key.offset + len; - } - i++; - - if (*last_extent == key.offset) { - *last_extent = extent_end; - continue; - } - offset = *last_extent; - len = key.offset - *last_extent; - ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), - offset, 0, 0, len, 0, len, 0, 0, 0); - if (ret) - break; - *last_extent = extent_end; - } - - /* - * Check if there is a hole between the last extent found in our leaf - * and the first extent in the next leaf. If there is one, we need to - * log an explicit hole so that at replay time we can punch the hole. - */ - if (ret == 0 && - key.objectid == btrfs_ino(inode) && - key.type == BTRFS_EXTENT_DATA_KEY && - i == btrfs_header_nritems(src_path->nodes[0])) { - ret = btrfs_next_leaf(inode->root, src_path); - need_find_last_extent = true; - if (ret > 0) { - ret = 0; - } else if (ret == 0) { - btrfs_item_key_to_cpu(src_path->nodes[0], &key, - src_path->slots[0]); - if (key.objectid == btrfs_ino(inode) && - key.type == BTRFS_EXTENT_DATA_KEY && - *last_extent < key.offset) { - const u64 len = key.offset - *last_extent; - - ret = btrfs_insert_file_extent(trans, log, - btrfs_ino(inode), - *last_extent, 0, - 0, len, 0, len, - 0, 0, 0); - *last_extent += len; - } - } - } - /* - * Need to let the callers know we dropped the path so they should - * re-search. - */ - if (!ret && need_find_last_extent) - ret = 1; return ret; } @@ -4407,7 +4223,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, const u64 i_size = i_size_read(&inode->vfs_inode); const u64 ino = btrfs_ino(inode); struct btrfs_path *dst_path = NULL; - u64 last_extent = (u64)-1; + bool dropped_extents = false; int ins_nr = 0; int start_slot; int ret; @@ -4429,8 +4245,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, if (slot >= btrfs_header_nritems(leaf)) { if (ins_nr > 0) { ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); + start_slot, ins_nr, 1, 0); if (ret < 0) goto out; ins_nr = 0; @@ -4454,8 +4269,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, path->slots[0]++; continue; } - if (last_extent == (u64)-1) { - last_extent = key.offset; + if (!dropped_extents) { /* * Avoid logging extent items logged in past fsync calls * and leading to duplicate keys in the log tree. @@ -4469,6 +4283,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, } while (ret == -EAGAIN); if (ret) goto out; + dropped_extents = true; } if (ins_nr == 0) start_slot = slot; @@ -4483,7 +4298,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, } } if (ins_nr > 0) { - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); if (ret > 0) ret = 0; @@ -4670,13 +4485,8 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, if (slot >= nritems) { if (ins_nr > 0) { - u64 last_extent = 0; - ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); - /* can't be 1, extent items aren't processed */ - ASSERT(ret <= 0); + start_slot, ins_nr, 1, 0); if (ret < 0) return ret; ins_nr = 0; @@ -4700,13 +4510,8 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, cond_resched(); } if (ins_nr > 0) { - u64 last_extent = 0; - ret = copy_items(trans, inode, dst_path, path, - &last_extent, start_slot, - ins_nr, 1, 0); - /* can't be 1, extent items aren't processed */ - ASSERT(ret <= 0); + start_slot, ins_nr, 1, 0); if (ret < 0) return ret; } @@ -4715,100 +4520,119 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, } /* - * If the no holes feature is enabled we need to make sure any hole between the - * last extent and the i_size of our inode is explicitly marked in the log. This - * is to make sure that doing something like: - * - * 1) create file with 128Kb of data - * 2) truncate file to 64Kb - * 3) truncate file to 256Kb - * 4) fsync file - * 5) - * 6) mount fs and trigger log replay - * - * Will give us a file with a size of 256Kb, the first 64Kb of data match what - * the file had in its first 64Kb of data at step 1 and the last 192Kb of the - * file correspond to a hole. The presence of explicit holes in a log tree is - * what guarantees that log replay will remove/adjust file extent items in the - * fs/subvol tree. - * - * Here we do not need to care about holes between extents, that is already done - * by copy_items(). We also only need to do this in the full sync path, where we - * lookup for extents from the fs/subvol tree only. In the fast path case, we - * lookup the list of modified extent maps and if any represents a hole, we - * insert a corresponding extent representing a hole in the log tree. + * When using the NO_HOLES feature if we punched a hole that causes the + * deletion of entire leafs or all the extent items of the first leaf (the one + * that contains the inode item and references) we may end up not processing + * any extents, because there are no leafs with a generation matching the + * current transaction that have extent items for our inode. So we need to find + * if any holes exist and then log them. We also need to log holes after any + * truncate operation that changes the inode's size. */ -static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_inode *inode, - struct btrfs_path *path) +static int btrfs_log_holes(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_inode *inode, + struct btrfs_path *path) { struct btrfs_fs_info *fs_info = root->fs_info; - int ret; struct btrfs_key key; - u64 hole_start; - u64 hole_size; - struct extent_buffer *leaf; - struct btrfs_root *log = root->log_root; const u64 ino = btrfs_ino(inode); const u64 i_size = i_size_read(&inode->vfs_inode); + u64 prev_extent_end = 0; + int ret; - if (!btrfs_fs_incompat(fs_info, NO_HOLES)) + if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) return 0; key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = (u64)-1; + key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - ASSERT(ret != 0); if (ret < 0) return ret; - ASSERT(path->slots[0] > 0); - path->slots[0]--; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - - if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { - /* inode does not have any extents */ - hole_start = 0; - hole_size = i_size; - } else { + while (true) { struct btrfs_file_extent_item *extent; + struct extent_buffer *leaf = path->nodes[0]; u64 len; - /* - * If there's an extent beyond i_size, an explicit hole was - * already inserted by copy_items(). - */ - if (key.offset >= i_size) - return 0; + if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + return ret; + if (ret > 0) { + ret = 0; + break; + } + leaf = path->nodes[0]; + } + + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) + break; + + /* We have a hole, log it. */ + if (prev_extent_end < key.offset) { + const u64 hole_len = key.offset - prev_extent_end; + + /* + * Release the path to avoid deadlocks with other code + * paths that search the root while holding locks on + * leafs from the log root. + */ + btrfs_release_path(path); + ret = btrfs_insert_file_extent(trans, root->log_root, + ino, prev_extent_end, 0, + 0, hole_len, 0, hole_len, + 0, 0, 0); + if (ret < 0) + return ret; + + /* + * Search for the same key again in the root. Since it's + * an extent item and we are holding the inode lock, the + * key must still exist. If it doesn't just emit warning + * and return an error to fall back to a transaction + * commit. + */ + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ret; + if (WARN_ON(ret > 0)) + return -ENOENT; + leaf = path->nodes[0]; + } extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, extent) == - BTRFS_FILE_EXTENT_INLINE) - return 0; + BTRFS_FILE_EXTENT_INLINE) { + len = btrfs_file_extent_ram_bytes(leaf, extent); + prev_extent_end = ALIGN(key.offset + len, + fs_info->sectorsize); + } else { + len = btrfs_file_extent_num_bytes(leaf, extent); + prev_extent_end = key.offset + len; + } - len = btrfs_file_extent_num_bytes(leaf, extent); - /* Last extent goes beyond i_size, no need to log a hole. */ - if (key.offset + len > i_size) - return 0; - hole_start = key.offset + len; - hole_size = i_size - hole_start; + path->slots[0]++; + cond_resched(); } - btrfs_release_path(path); - /* Last extent ends at i_size. */ - if (hole_size == 0) - return 0; + if (prev_extent_end < i_size) { + u64 hole_len; - hole_size = ALIGN(hole_size, fs_info->sectorsize); - ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, - hole_size, 0, hole_size, 0, 0, 0); - return ret; + btrfs_release_path(path); + hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); + ret = btrfs_insert_file_extent(trans, root->log_root, + ino, prev_extent_end, 0, 0, + hole_len, 0, hole_len, + 0, 0, 0); + if (ret < 0) + return ret; + } + + return 0; } /* @@ -5110,7 +4934,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; - u64 last_extent = 0; int err = 0; int ret; int nritems; @@ -5288,7 +5111,7 @@ again: ins_start_slot = path->slots[0]; } ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { @@ -5311,17 +5134,13 @@ again: if (ins_nr == 0) goto next_slot; ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } ins_nr = 0; - if (ret) { - btrfs_release_path(path); - continue; - } goto next_slot; } @@ -5334,18 +5153,13 @@ again: goto next_slot; } - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - if (ret) { - ins_nr = 0; - btrfs_release_path(path); - continue; - } ins_nr = 1; ins_start_slot = path->slots[0]; next_slot: @@ -5359,13 +5173,12 @@ next_slot: } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, - &last_extent, ins_start_slot, + ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - ret = 0; ins_nr = 0; } btrfs_release_path(path); @@ -5380,14 +5193,13 @@ next_key: } } if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, &last_extent, + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; } - ret = 0; ins_nr = 0; } @@ -5400,7 +5212,7 @@ next_key: if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { btrfs_release_path(path); btrfs_release_path(dst_path); - err = btrfs_log_trailing_hole(trans, root, inode, path); + err = btrfs_log_holes(trans, root, inode, path); if (err) goto out_unlock; } -- cgit v1.2.3-59-g8ed1b From e4f6c6be814850249a745ea43d5c49800e7178d4 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 14 May 2019 01:59:54 +0200 Subject: btrfs: use raid_attr table in calc_stripe_length for nparity The table is already used for ncopies, replace open coding of stripes with the raid_attr value. Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9b78e720c697..687eacbef9d6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6480,19 +6480,14 @@ static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) { int index = btrfs_bg_flags_to_raid_index(type); int ncopies = btrfs_raid_array[index].ncopies; + const int nparity = btrfs_raid_array[index].nparity; int data_stripes; - switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { - case BTRFS_BLOCK_GROUP_RAID5: - data_stripes = num_stripes - 1; - break; - case BTRFS_BLOCK_GROUP_RAID6: - data_stripes = num_stripes - 2; - break; - default: + if (nparity) + data_stripes = num_stripes - nparity; + else data_stripes = num_stripes / ncopies; - break; - } + return div_u64(chunk_len, data_stripes); } -- cgit v1.2.3-59-g8ed1b From db26a024497c004fa1bdf26263d90de2bfb0edba Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 25 Nov 2019 15:34:48 +0100 Subject: btrfs: fill ncopies for all raid table entries Make the number of copies explicit even for entries that use the default 0 value for consistency. Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 687eacbef9d6..b1bcd245ec66 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -66,6 +66,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { .tolerated_failures = 2, .devs_increment = 3, .ncopies = 3, + .nparity = 0, .raid_name = "raid1c3", .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, @@ -78,6 +79,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { .tolerated_failures = 3, .devs_increment = 4, .ncopies = 4, + .nparity = 0, .raid_name = "raid1c4", .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, -- cgit v1.2.3-59-g8ed1b From b5501504cb6a70d55c3dcf1b2ceb15386e615237 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 21 Nov 2019 17:33:30 +0800 Subject: btrfs: sysfs, rename devices kobject holder to devices_kobj The struct member btrfs_device::device_dir_kobj holds the kobj of the sysfs directory /sys/fs/btrfs/UUID/devices, so rename it from device_dir_kobj to devices_kobj. No functional changes. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 26 ++++++++++++-------------- fs/btrfs/volumes.h | 2 +- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 5ebbe8a5ee76..3799af42ee29 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -734,10 +734,10 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add) static void __btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs) { - if (fs_devs->device_dir_kobj) { - kobject_del(fs_devs->device_dir_kobj); - kobject_put(fs_devs->device_dir_kobj); - fs_devs->device_dir_kobj = NULL; + if (fs_devs->devices_kobj) { + kobject_del(fs_devs->devices_kobj); + kobject_put(fs_devs->devices_kobj); + fs_devs->devices_kobj = NULL; } if (fs_devs->fsid_kobj.state_initialized) { @@ -969,15 +969,14 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, struct hd_struct *disk; struct kobject *disk_kobj; - if (!fs_devices->device_dir_kobj) + if (!fs_devices->devices_kobj) return -EINVAL; if (one_device && one_device->bdev) { disk = one_device->bdev->bd_part; disk_kobj = &part_to_dev(disk)->kobj; - sysfs_remove_link(fs_devices->device_dir_kobj, - disk_kobj->name); + sysfs_remove_link(fs_devices->devices_kobj, disk_kobj->name); } if (one_device) @@ -990,8 +989,7 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, disk = one_device->bdev->bd_part; disk_kobj = &part_to_dev(disk)->kobj; - sysfs_remove_link(fs_devices->device_dir_kobj, - disk_kobj->name); + sysfs_remove_link(fs_devices->devices_kobj, disk_kobj->name); } return 0; @@ -999,11 +997,11 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs) { - if (!fs_devs->device_dir_kobj) - fs_devs->device_dir_kobj = kobject_create_and_add("devices", - &fs_devs->fsid_kobj); + if (!fs_devs->devices_kobj) + fs_devs->devices_kobj = kobject_create_and_add("devices", + &fs_devs->fsid_kobj); - if (!fs_devs->device_dir_kobj) + if (!fs_devs->devices_kobj) return -ENOMEM; return 0; @@ -1028,7 +1026,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, disk = dev->bdev->bd_part; disk_kobj = &part_to_dev(disk)->kobj; - error = sysfs_create_link(fs_devices->device_dir_kobj, + error = sysfs_create_link(fs_devices->devices_kobj, disk_kobj, disk_kobj->name); if (error) break; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index fc1b564b9cfe..3c56ef571b00 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -255,7 +255,7 @@ struct btrfs_fs_devices { struct btrfs_fs_info *fs_info; /* sysfs kobjects */ struct kobject fsid_kobj; - struct kobject *device_dir_kobj; + struct kobject *devices_kobj; struct completion kobj_unregister; }; -- cgit v1.2.3-59-g8ed1b From c6761a9ed329dd41fee09dc2926126768e5ca34c Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 21 Nov 2019 17:33:32 +0800 Subject: btrfs: sysfs, btrfs_sysfs_add_fsid() drop unused argument parent Commit 24bd69cb ("Btrfs: sysfs: add support to add parent for fsid") added parent argument in preparation to show the seed fsid under the sprout fsid as in the patch [1] in the mailing list. [1] Btrfs: sysfs: support seed devices in the sysfs layout But later this idea was superseded by another idea to rename the fsid as in the commit f93c39970b1d ("btrfs: factor out sysfs code for updating sprout fsid"). So we don't need parent argument anymore. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/sysfs.c | 11 ++++++----- fs/btrfs/sysfs.h | 5 ++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e0edfdc9c82b..639104326691 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3082,7 +3082,7 @@ int __cold open_ctree(struct super_block *sb, btrfs_free_extra_devids(fs_devices, 1); - ret = btrfs_sysfs_add_fsid(fs_devices, NULL); + ret = btrfs_sysfs_add_fsid(fs_devices); if (ret) { btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", ret); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 3799af42ee29..1d8ee57da164 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1065,18 +1065,19 @@ void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, static struct kset *btrfs_kset; /* + * Creates: + * /sys/fs/btrfs/UUID + * * Can be called by the device discovery thread. - * And parent can be specified for seed device */ -int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs, - struct kobject *parent) +int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs) { int error; init_completion(&fs_devs->kobj_unregister); fs_devs->fsid_kobj.kset = btrfs_kset; - error = kobject_init_and_add(&fs_devs->fsid_kobj, - &btrfs_ktype, parent, "%pU", fs_devs->fsid); + error = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL, + "%pU", fs_devs->fsid); if (error) { kobject_put(&fs_devs->fsid_kobj); return error; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index e10c3adfc30f..a0454947f41a 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -18,9 +18,8 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); -int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs, - struct kobject *parent); -int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs); +int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs); +int btrfs_sysfs_add_devices_kobj(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, const u8 *fsid); -- cgit v1.2.3-59-g8ed1b From be2cf92e0a2f64efe0709a2cfe5d9f4852d14b61 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 21 Nov 2019 17:33:33 +0800 Subject: btrfs: sysfs, rename btrfs_sysfs_add_device() btrfs_sysfs_add_device() creates the directory /sys/fs/btrfs/UUID/devices but its function name is misleading. Rename it to btrfs_sysfs_add_devices_kobj() instead. No functional changes. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/sysfs.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 639104326691..65cfd0fd0f83 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3089,7 +3089,7 @@ int __cold open_ctree(struct super_block *sb, goto fail_block_groups; } - ret = btrfs_sysfs_add_device(fs_devices); + ret = btrfs_sysfs_add_devices_kobj(fs_devices); if (ret) { btrfs_err(fs_info, "failed to init sysfs device interface: %d", ret); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 1d8ee57da164..c76c38d74972 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -995,7 +995,7 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, return 0; } -int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs) +int btrfs_sysfs_add_devices_kobj(struct btrfs_fs_devices *fs_devs) { if (!fs_devs->devices_kobj) fs_devs->devices_kobj = kobject_create_and_add("devices", -- cgit v1.2.3-59-g8ed1b From bc036bb33524dcb2525d74c336e9694d5d3c0047 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 21 Nov 2019 17:33:34 +0800 Subject: btrfs: sysfs, merge btrfs_sysfs_add devices_kobj and fsid Merge btrfs_sysfs_add_fsid() and btrfs_sysfs_add_devices_kobj() functions as these two are small and they are called one after the other. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 7 ------- fs/btrfs/sysfs.c | 21 +++++++++------------ fs/btrfs/sysfs.h | 1 - 3 files changed, 9 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 65cfd0fd0f83..ab888d89d844 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3089,13 +3089,6 @@ int __cold open_ctree(struct super_block *sb, goto fail_block_groups; } - ret = btrfs_sysfs_add_devices_kobj(fs_devices); - if (ret) { - btrfs_err(fs_info, "failed to init sysfs device interface: %d", - ret); - goto fail_fsdev_sysfs; - } - ret = btrfs_sysfs_add_mounted(fs_info); if (ret) { btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index c76c38d74972..16379f491ca1 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -995,18 +995,6 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, return 0; } -int btrfs_sysfs_add_devices_kobj(struct btrfs_fs_devices *fs_devs) -{ - if (!fs_devs->devices_kobj) - fs_devs->devices_kobj = kobject_create_and_add("devices", - &fs_devs->fsid_kobj); - - if (!fs_devs->devices_kobj) - return -ENOMEM; - - return 0; -} - int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device) { @@ -1083,6 +1071,15 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs) return error; } + fs_devs->devices_kobj = kobject_create_and_add("devices", + &fs_devs->fsid_kobj); + if (!fs_devs->devices_kobj) { + btrfs_err(fs_devs->fs_info, + "failed to init sysfs device interface"); + kobject_put(&fs_devs->fsid_kobj); + return -ENOMEM; + } + return 0; } diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index a0454947f41a..3d27b39eaf94 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -19,7 +19,6 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs); -int btrfs_sysfs_add_devices_kobj(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, const u8 *fsid); -- cgit v1.2.3-59-g8ed1b From e3b0edd29737d44137fc7583a9c185abda6e23b8 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Mon, 25 Nov 2019 21:58:50 -0300 Subject: btrfs: qgroup: remove one-time use variables for quota_root checks Remove some variables that are set only to be checked later, and never used. Reviewed-by: Qu Wenruo Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 39fc8c3d3a75..0581ea6c4ae3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1243,7 +1243,6 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *quota_root; struct btrfs_qgroup *parent; struct btrfs_qgroup *member; struct btrfs_qgroup_list *list; @@ -1259,8 +1258,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, return -ENOMEM; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { + if (!fs_info->quota_root) { ret = -EINVAL; goto out; } @@ -1307,7 +1305,6 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *quota_root; struct btrfs_qgroup *parent; struct btrfs_qgroup *member; struct btrfs_qgroup_list *list; @@ -1320,8 +1317,7 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, if (!tmp) return -ENOMEM; - quota_root = fs_info->quota_root; - if (!quota_root) { + if (!fs_info->quota_root) { ret = -EINVAL; goto out; } @@ -1387,11 +1383,11 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) int ret = 0; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { + if (!fs_info->quota_root) { ret = -EINVAL; goto out; } + quota_root = fs_info->quota_root; qgroup = find_qgroup_rb(fs_info, qgroupid); if (qgroup) { ret = -EEXIST; @@ -1416,14 +1412,12 @@ out: int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; struct btrfs_qgroup_list *list; int ret = 0; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { + if (!fs_info->quota_root) { ret = -EINVAL; goto out; } @@ -1465,7 +1459,6 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, struct btrfs_qgroup_limit *limit) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; int ret = 0; /* Sometimes we would want to clear the limit on this qgroup. @@ -1475,8 +1468,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, const u64 CLEAR_VALUE = -1; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { + if (!fs_info->quota_root) { ret = -EINVAL; goto out; } @@ -2582,10 +2574,9 @@ cleanup: int btrfs_run_qgroups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *quota_root = fs_info->quota_root; int ret = 0; - if (!quota_root) + if (!fs_info->quota_root) return ret; spin_lock(&fs_info->qgroup_lock); @@ -2879,7 +2870,6 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, enum btrfs_qgroup_rsv_type type) { - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; struct btrfs_fs_info *fs_info = root->fs_info; u64 ref_root = root->root_key.objectid; @@ -2898,8 +2888,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, enforce = false; spin_lock(&fs_info->qgroup_lock); - quota_root = fs_info->quota_root; - if (!quota_root) + if (!fs_info->quota_root) goto out; qgroup = find_qgroup_rb(fs_info, ref_root); @@ -2966,7 +2955,6 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, u64 ref_root, u64 num_bytes, enum btrfs_qgroup_rsv_type type) { - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; struct ulist_node *unode; struct ulist_iterator uiter; @@ -2984,8 +2972,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, } spin_lock(&fs_info->qgroup_lock); - quota_root = fs_info->quota_root; - if (!quota_root) + if (!fs_info->quota_root) goto out; qgroup = find_qgroup_rb(fs_info, ref_root); @@ -3685,7 +3672,6 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, int num_bytes) { - struct btrfs_root *quota_root = fs_info->quota_root; struct btrfs_qgroup *qgroup; struct ulist_node *unode; struct ulist_iterator uiter; @@ -3693,7 +3679,7 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, if (num_bytes == 0) return; - if (!quota_root) + if (!fs_info->quota_root) return; spin_lock(&fs_info->qgroup_lock); -- cgit v1.2.3-59-g8ed1b From 8a36e408d40606e21cd4e2dd9601004a67b14868 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Mon, 25 Nov 2019 21:58:51 -0300 Subject: btrfs: qgroup: return ENOTCONN instead of EINVAL when quotas are not enabled [PROBLEM] qgroup create/remove code is currently returning EINVAL when the user tries to create a qgroup on a subvolume without quota enabled. EINVAL is already being used for too many error scenarios so that is hard to depict what is the problem. [FIX] Currently scrub and balance code return -ENOTCONN when the user tries to cancel/pause and no scrub or balance is currently running for the desired subvolume. Do the same here by returning -ENOTCONN when a user tries to create/delete/assing/list a qgroup on a subvolume without quota enabled. Reviewed-by: Qu Wenruo Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 0581ea6c4ae3..98d9a50352d6 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1259,7 +1259,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { - ret = -EINVAL; + ret = -ENOTCONN; goto out; } member = find_qgroup_rb(fs_info, src); @@ -1318,7 +1318,7 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, return -ENOMEM; if (!fs_info->quota_root) { - ret = -EINVAL; + ret = -ENOTCONN; goto out; } @@ -1384,7 +1384,7 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { - ret = -EINVAL; + ret = -ENOTCONN; goto out; } quota_root = fs_info->quota_root; @@ -1418,7 +1418,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { - ret = -EINVAL; + ret = -ENOTCONN; goto out; } @@ -1469,7 +1469,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { - ret = -EINVAL; + ret = -ENOTCONN; goto out; } -- cgit v1.2.3-59-g8ed1b From 4eaaec24c087a52c6b2ed75856037ae42cc6a830 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 21 Nov 2019 14:03:29 +0200 Subject: btrfs: Don't discard unwritten extents All callers of btrfs_free_reserved_extent (respectively __btrfs_free_reserved_extent with in set to 0) pass in extents which have only been reserved but not yet written to. Namely, * in cow_file_range that function is called only if create_io_em fails or btrfs_add_ordered_extent fail, both of which happen _before_ any IO is submitted to the newly reserved range * in submit_compressed_extents the code flow is similar - out_free_reserve can be called only before btrfs_submit_compressed_write which is where any writes to the range could occur * btrfs_new_extent_direct also calls btrfs_free_reserved_extent only if extent_map fails, before any IO is issued * __btrfs_prealloc_file_range also calls btrfs_free_reserved_extent in case insertion of the metadata fails * btrfs_alloc_tree_block again can only be called in case in-memory operations fail, before any IO is submitted * btrfs_finish_ordered_io - this is the only caller where discarding the extent could have a material effect, since it can be called for an extent which was partially written. With this change the submission of discards is optimised since discards are now not being created for extents which are known to not have been touched on disk. Reviewed-by: Filipe Manana Signed-off-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 2 -- fs/btrfs/inode.c | 11 ++++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 274318e9114e..3495710c72b8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4182,8 +4182,6 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, if (pin) pin_down_extent(cache, start, len, 1); else { - if (btrfs_test_opt(fs_info, DISCARD)) - ret = btrfs_discard_extent(fs_info, start, len, NULL); btrfs_add_free_space(cache, start, len); btrfs_free_reserved_bytes(cache, len, delalloc); trace_btrfs_reserved_extent_free(fs_info, start, len); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c70baafb2a39..c1cee6a01c35 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3250,10 +3250,19 @@ out: if ((ret || !logical_len) && clear_reserved_extent && !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && - !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) + !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { + /* + * Discard the range before returning it back to the + * free space pool + */ + if (ret && btrfs_test_opt(fs_info, DISCARD)) + btrfs_discard_extent(fs_info, + ordered_extent->start, + ordered_extent->disk_len, NULL); btrfs_free_reserved_extent(fs_info, ordered_extent->start, ordered_extent->disk_len, 1); + } } -- cgit v1.2.3-59-g8ed1b From 7ef54d54bf6aacb5faeb5a7f3db18b7828498099 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 21 Nov 2019 14:03:30 +0200 Subject: btrfs: Open code __btrfs_free_reserved_extent in btrfs_free_reserved_extent __btrfs_free_reserved_extent performs 2 entirely different operations depending on whether its 'pin' argument is true or false. This patch lifts the 2nd case (pin is false) into it's sole caller btrfs_free_reserved_extent. No semantics changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3495710c72b8..215dceaacaea 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4179,14 +4179,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, return -ENOSPC; } - if (pin) - pin_down_extent(cache, start, len, 1); - else { - btrfs_add_free_space(cache, start, len); - btrfs_free_reserved_bytes(cache, len, delalloc); - trace_btrfs_reserved_extent_free(fs_info, start, len); - } - + ret = pin_down_extent(cache, start, len, 1); btrfs_put_block_group(cache); return ret; } @@ -4194,7 +4187,20 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int delalloc) { - return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc); + struct btrfs_block_group *cache; + + cache = btrfs_lookup_block_group(fs_info, start); + if (!cache) { + btrfs_err(fs_info, "unable to find block group for %llu", start); + return -ENOSPC; + } + + btrfs_add_free_space(cache, start, len); + btrfs_free_reserved_bytes(cache, len, delalloc); + trace_btrfs_reserved_extent_free(fs_info, start, len); + + btrfs_put_block_group(cache); + return 0; } int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info, -- cgit v1.2.3-59-g8ed1b From a0fbf736d35efcddbfaacddd88aa4ca61c1668c3 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 21 Nov 2019 14:03:31 +0200 Subject: btrfs: Rename __btrfs_free_reserved_extent to btrfs_pin_reserved_extent __btrfs_free_reserved_extent now performs the actions of btrfs_free_and_pin_reserved_extent. But this name is a bit of a misnomer, since the extent is not really freed but just pinned. Reflect this in the new name. No semantics changes. Signed-off-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/extent-tree.c | 30 +++++++++++------------------- fs/btrfs/tree-log.c | 12 +++++------- 3 files changed, 18 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 54efb21c2727..ea49e4b52cd2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2449,8 +2449,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int delalloc); -int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len); +int btrfs_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, + u64 len); void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 215dceaacaea..2a7dff22c3b7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4165,12 +4165,10 @@ again: return ret; } -static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len, - int pin, int delalloc) +int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, + u64 start, u64 len, int delalloc) { struct btrfs_block_group *cache; - int ret = 0; cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { @@ -4179,15 +4177,18 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, return -ENOSPC; } - ret = pin_down_extent(cache, start, len, 1); + btrfs_add_free_space(cache, start, len); + btrfs_free_reserved_bytes(cache, len, delalloc); + trace_btrfs_reserved_extent_free(fs_info, start, len); + btrfs_put_block_group(cache); - return ret; + return 0; } -int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len, int delalloc) +int btrfs_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) { struct btrfs_block_group *cache; + int ret = 0; cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { @@ -4195,18 +4196,9 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, return -ENOSPC; } - btrfs_add_free_space(cache, start, len); - btrfs_free_reserved_bytes(cache, len, delalloc); - trace_btrfs_reserved_extent_free(fs_info, start, len); - + ret = pin_down_extent(cache, start, len, 1); btrfs_put_block_group(cache); - return 0; -} - -int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len) -{ - return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0); + return ret; } static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 44480762003c..47f4c6eea851 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2732,9 +2732,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); - ret = btrfs_free_and_pin_reserved_extent( - fs_info, bytenr, - blocksize); + ret = btrfs_pin_reserved_extent(fs_info, + bytenr, blocksize); if (ret) { free_extent_buffer(next); return ret; @@ -2815,8 +2814,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, } WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); - ret = btrfs_free_and_pin_reserved_extent( - fs_info, + ret = btrfs_pin_reserved_extent(fs_info, path->nodes[*level]->start, path->nodes[*level]->len); if (ret) @@ -2898,8 +2896,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, WARN_ON(log->root_key.objectid != BTRFS_TREE_LOG_OBJECTID); - ret = btrfs_free_and_pin_reserved_extent(fs_info, - next->start, next->len); + ret = btrfs_pin_reserved_extent(fs_info, next->start, + next->len); if (ret) goto out; } -- cgit v1.2.3-59-g8ed1b From 5c4b691eb8958dfdba9bb77d5ac4897bbaeb9064 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 2 Dec 2019 11:40:13 +0200 Subject: btrfs: Remove WARN_ON in walk_log_tree The log_root passed to walk_log_tree is guaranteed to have its root_key.objectid always be BTRFS_TREE_LOG_OBJECTID. This is by merit that all log roots of an ordinary root are allocated in alloc_log_tree which hard-codes objectid to be BTRFS_TREE_LOG_OBJECTID. In case walk_log_tree is called for a log tree found by btrfs_read_fs_root in btrfs_recover_log_trees, that function already ensures found_key.objectid is BTRFS_TREE_LOG_OBJECTID. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 47f4c6eea851..9a6b1303fcab 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2894,8 +2894,6 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, clear_extent_buffer_dirty(next); } - WARN_ON(log->root_key.objectid != - BTRFS_TREE_LOG_OBJECTID); ret = btrfs_pin_reserved_extent(fs_info, next->start, next->len); if (ret) -- cgit v1.2.3-59-g8ed1b From 36ee0b44ad238c7797e1f7cbefdf75c6cf887101 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 2 Dec 2019 11:40:14 +0200 Subject: btrfs: Remove redundant WARN_ON in walk_down_log_tree level <0 and level >= BTRFS_MAX_LEVEL are already performed upon extent buffer read by tree checker in btrfs_check_node. go. As far as 'level <= 0' we are guaranteed that level is '> 0' because the value of level _before_ reading 'next' is larger than 1 (otherwise we wouldn't have executed that code at all) this in turn guarantees that 'level' after btrfs_read_buffer is 'level - 1' since we verify this invariant in: btrfs_read_buffer btree_read_extent_buffer_pages btrfs_verify_level_key This guarantees that level can never be '<= 0' so the warn on is never triggered. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a6b1303fcab..a2bae5c230e1 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2674,14 +2674,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, u32 blocksize; int ret = 0; - WARN_ON(*level < 0); - WARN_ON(*level >= BTRFS_MAX_LEVEL); - while (*level > 0) { struct btrfs_key first_key; - WARN_ON(*level < 0); - WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; WARN_ON(btrfs_header_level(cur) != *level); @@ -2748,7 +2743,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, return ret; } - WARN_ON(*level <= 0); if (path->nodes[*level-1]) free_extent_buffer(path->nodes[*level-1]); path->nodes[*level-1] = next; @@ -2756,9 +2750,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, path->slots[*level] = 0; cond_resched(); } - WARN_ON(*level < 0); - WARN_ON(*level >= BTRFS_MAX_LEVEL); - path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); cond_resched(); -- cgit v1.2.3-59-g8ed1b From 76de60ed044f26f0ab62c2f752cb3d0eade2cde0 Mon Sep 17 00:00:00 2001 From: Yunfeng Ye Date: Tue, 3 Dec 2019 16:59:25 +0800 Subject: btrfs: remove unused condition check in btrfs_page_mkwrite() The condition '!ret2' is always true. commit 717beb96d969 ("Btrfs: fix regression in btrfs_page_mkwrite() from vm_fault_t conversion") left behind the check after moving this code out of the goto, so remove the unused condition check. Reviewed-by: Omar Sandoval Signed-off-by: Yunfeng Ye Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c1cee6a01c35..73c148eaae9f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9099,7 +9099,6 @@ again: ret = VM_FAULT_SIGBUS; goto out_unlock; } - ret2 = 0; /* page is wholly or partially inside EOF */ if (page_start + PAGE_SIZE > size) @@ -9123,12 +9122,10 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state); - if (!ret2) { - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); - sb_end_pagefault(inode->i_sb); - extent_changeset_free(data_reserved); - return VM_FAULT_LOCKED; - } + btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + sb_end_pagefault(inode->i_sb); + extent_changeset_free(data_reserved); + return VM_FAULT_LOCKED; out_unlock: unlock_page(page); -- cgit v1.2.3-59-g8ed1b From 430640e31649beeb84e2c66f77ab68d7423b66bd Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 29 Nov 2019 12:40:59 +0800 Subject: btrfs: relocation: Output current relocation stage at btrfs_relocate_block_group() There are two relocation stages but both print the same message. Add the description of the stage. This can help debugging or provides informative message to users. BTRFS info (device dm-5): balance: start -d -m -s BTRFS info (device dm-5): relocating block group 30408704 flags metadata|dup BTRFS info (device dm-5): found 2 extents, stage: move data extents BTRFS info (device dm-5): relocating block group 22020096 flags system|dup BTRFS info (device dm-5): found 1 extents, stage: move data extents BTRFS info (device dm-5): relocating block group 13631488 flags data BTRFS info (device dm-5): found 1 extents, stage: move data extents BTRFS info (device dm-5): found 1 extents, stage: update data pointers BTRFS info (device dm-5): balance: ended with status: 0 Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/relocation.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index da5abd62db22..1875e84b9bf7 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4332,6 +4332,15 @@ static void describe_relocation(struct btrfs_fs_info *fs_info, block_group->start, buf); } +static const char *stage_to_string(int stage) +{ + if (stage == MOVE_DATA_EXTENTS) + return "move data extents"; + if (stage == UPDATE_DATA_PTRS) + return "update data pointers"; + return "unknown"; +} + /* * function to relocate all extents in a block group. */ @@ -4406,12 +4415,15 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) rc->block_group->length); while (1) { + int finishes_stage; + mutex_lock(&fs_info->cleaner_mutex); ret = relocate_block_group(rc); mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) err = ret; + finishes_stage = rc->stage; /* * We may have gotten ENOSPC after we already dirtied some * extents. If writeout happens while we're relocating a @@ -4437,8 +4449,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) if (rc->extents_found == 0) break; - btrfs_info(fs_info, "found %llu extents", rc->extents_found); - + btrfs_info(fs_info, "found %llu extents, stage: %s", + rc->extents_found, stage_to_string(finishes_stage)); } WARN_ON(rc->block_group->pinned > 0); -- cgit v1.2.3-59-g8ed1b From 511a32b5493f0d0d89b1a07bdbb82ac777993575 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 29 Nov 2019 11:38:13 +0200 Subject: btrfs: Opencode ordered_data_tree_panic It's a simple wrapper over btrfs_panic and is called only once. Just open code it. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ordered-data.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index fb09bc2f8e4d..ed29c30802ab 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -52,14 +52,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, return NULL; } -static void ordered_data_tree_panic(struct inode *inode, int errno, - u64 offset) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - btrfs_panic(fs_info, errno, - "Inconsistency in ordered tree at offset %llu", offset); -} - /* * look for a given offset in the tree, and if it can't be found return the * first lesser offset @@ -219,7 +211,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, node = tree_insert(&tree->tree, file_offset, &entry->rb_node); if (node) - ordered_data_tree_panic(inode, -EEXIST, file_offset); + btrfs_panic(fs_info, -EEXIST, + "inconsistency in ordered tree at offset %llu", + file_offset); spin_unlock_irq(&tree->lock); spin_lock(&root->ordered_extent_lock); -- cgit v1.2.3-59-g8ed1b From 9f246926b4d5db4c5e8c78e4897757de26c95be6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 26 Nov 2019 11:25:53 -0500 Subject: btrfs: don't pass system_chunk into can_overcommit We have the space_info, we can just check its flags to see if it's the system chunk space info. Reviewed-by: Nikolay Borisov Reviewed-by: Qu Wenruo Reviewed-by: Johannes Thumshirn Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 42 +++++++++++++++--------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index f09aa6ee9113..537bc310a673 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -161,8 +161,7 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) static int can_overcommit(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 bytes, - enum btrfs_reserve_flush_enum flush, - bool system_chunk) + enum btrfs_reserve_flush_enum flush) { u64 profile; u64 avail; @@ -173,7 +172,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) return 0; - if (system_chunk) + if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) profile = btrfs_system_alloc_profile(fs_info); else profile = btrfs_metadata_alloc_profile(fs_info); @@ -227,8 +226,7 @@ again: /* Check and see if our ticket can be satisified now. */ if ((used + ticket->bytes <= space_info->total_bytes) || - can_overcommit(fs_info, space_info, ticket->bytes, flush, - false)) { + can_overcommit(fs_info, space_info, ticket->bytes, flush)) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, ticket->bytes); @@ -626,8 +624,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, static inline u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - bool system_chunk) + struct btrfs_space_info *space_info) { struct reserve_ticket *ticket; u64 used; @@ -643,13 +640,12 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); if (can_overcommit(fs_info, space_info, to_reclaim, - BTRFS_RESERVE_FLUSH_ALL, system_chunk)) + BTRFS_RESERVE_FLUSH_ALL)) return 0; used = btrfs_space_info_used(space_info, true); - if (can_overcommit(fs_info, space_info, SZ_1M, - BTRFS_RESERVE_FLUSH_ALL, system_chunk)) + if (can_overcommit(fs_info, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL)) expected = div_factor_fine(space_info->total_bytes, 95); else expected = div_factor_fine(space_info->total_bytes, 90); @@ -665,7 +661,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, - u64 used, bool system_chunk) + u64 used) { u64 thresh = div_factor_fine(space_info->total_bytes, 98); @@ -673,8 +669,7 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) return 0; - if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info, - system_chunk)) + if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) return 0; return (used >= thresh && !btrfs_fs_closing(fs_info) && @@ -765,8 +760,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, - false); + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (!to_reclaim) { space_info->flush = 0; spin_unlock(&space_info->lock); @@ -785,8 +779,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) return; } to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, - space_info, - false); + space_info); if (last_tickets_id == space_info->tickets_id) { flush_state++; } else { @@ -858,8 +851,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, int flush_state; spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, - false); + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (!to_reclaim) { spin_unlock(&space_info->lock); return; @@ -990,8 +982,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, - enum btrfs_reserve_flush_enum flush, - bool system_chunk) + enum btrfs_reserve_flush_enum flush) { struct reserve_ticket ticket; u64 used; @@ -1013,8 +1004,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, */ if (!pending_tickets && ((used + orig_bytes <= space_info->total_bytes) || - can_overcommit(fs_info, space_info, orig_bytes, flush, - system_chunk))) { + can_overcommit(fs_info, space_info, orig_bytes, flush))) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, orig_bytes); ret = 0; @@ -1054,8 +1044,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * the async reclaim as we will panic. */ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && - need_do_async_reclaim(fs_info, space_info, - used, system_chunk) && + need_do_async_reclaim(fs_info, space_info, used) && !work_busy(&fs_info->async_reclaim_work)) { trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "preempt"); @@ -1092,10 +1081,9 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; int ret; - bool system_chunk = (root == fs_info->chunk_root); ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info, - orig_bytes, flush, system_chunk); + orig_bytes, flush); if (ret == -ENOSPC && unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { if (block_rsv != global_rsv && -- cgit v1.2.3-59-g8ed1b From f8935566372c579cf9076d344a76e7781365c6e3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 26 Nov 2019 11:25:54 -0500 Subject: btrfs: kill min_allocable_bytes in inc_block_group_ro This is a relic from a time before we had a proper reservation mechanism and you could end up with really full chunks at chunk allocation time. This doesn't make sense anymore, so just kill it. Reviewed-by: Qu Wenruo Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 6934a5b8708f..66fa39632cde 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1185,21 +1185,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force) struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; u64 sinfo_used; - u64 min_allocable_bytes; int ret = -ENOSPC; - /* - * We need some metadata space and system metadata space for - * allocating chunks in some corner cases until we force to set - * it to be readonly. - */ - if ((sinfo->flags & - (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && - !force) - min_allocable_bytes = SZ_1M; - else - min_allocable_bytes = 0; - spin_lock(&sinfo->lock); spin_lock(&cache->lock); @@ -1217,10 +1204,9 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force) * sinfo_used + num_bytes should always <= sinfo->total_bytes. * * Here we make sure if we mark this bg RO, we still have enough - * free space as buffer (if min_allocable_bytes is not 0). + * free space as buffer. */ - if (sinfo_used + num_bytes + min_allocable_bytes <= - sinfo->total_bytes) { + if (sinfo_used + num_bytes <= sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; cache->ro++; list_add_tail(&cache->ro_list, &sinfo->ro_bgs); @@ -1233,8 +1219,8 @@ out: btrfs_info(cache->fs_info, "unable to make block group %llu ro", cache->start); btrfs_info(cache->fs_info, - "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", - sinfo_used, num_bytes, min_allocable_bytes); + "sinfo_used=%llu bg_num_bytes=%llu", + sinfo_used, num_bytes); btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); } return ret; -- cgit v1.2.3-59-g8ed1b From 3dbd351df42109902fbcebf27104149226a4fcd9 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 5 Dec 2019 14:19:57 +0100 Subject: btrfs: fix possible NULL-pointer dereference in integrity checks A user reports a possible NULL-pointer dereference in btrfsic_process_superblock(). We are assigning state->fs_info to a local fs_info variable and afterwards checking for the presence of state. While we would BUG_ON() a NULL state anyways, we can also just remove the local fs_info copy, as fs_info is only used once as the first argument for btrfs_num_copies(). There we can just pass in state->fs_info as well. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=205003 Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/check-integrity.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 0b52ab4cb964..72c70f59fc60 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -629,7 +629,6 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, static int btrfsic_process_superblock(struct btrfsic_state *state, struct btrfs_fs_devices *fs_devices) { - struct btrfs_fs_info *fs_info = state->fs_info; struct btrfs_super_block *selected_super; struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; @@ -700,7 +699,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, break; } - num_copies = btrfs_num_copies(fs_info, next_bytenr, + num_copies = btrfs_num_copies(state->fs_info, next_bytenr, state->metablock_size); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) pr_info("num_copies(log_bytenr=%llu) = %d\n", -- cgit v1.2.3-59-g8ed1b From 129699522581f465e0300a286ed28a0aab0893d3 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 5 Dec 2019 14:19:58 +0100 Subject: btrfs: remove superfluous BUG_ON() in integrity checks btrfsic_process_superblock() BUG_ON()s if 'state' is NULL. But this can never happen as the only caller from btrfsic_process_superblock() is btrfsic_mount() which allocates 'state' some lines above calling btrfsic_process_superblock() and checks for the allocation to succeed. Let's just remove the impossible to hit BUG_ON(). Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/check-integrity.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 72c70f59fc60..a0ce69f2d27c 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -636,7 +636,6 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, int ret = 0; int pass; - BUG_ON(NULL == state); selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); if (NULL == selected_super) { pr_info("btrfsic: error, kmalloc failed!\n"); -- cgit v1.2.3-59-g8ed1b From b38f4cbd65fb56180f8d283f410ddec3965c93bf Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 5 Dec 2019 14:19:59 +0100 Subject: btrfs: remove impossible WARN_ON in btrfs_destroy_dev_replace_tgtdev() We have a user report, that cppcheck is complaining about a possible NULL-pointer dereference in btrfs_destroy_dev_replace_tgtdev(). We're first dereferencing the 'tgtdev' variable and the later check for the validity of the pointer with a WARN_ON(!tgtdev); But all callers of btrfs_destroy_dev_replace_tgtdev() either explicitly check if 'tgtdev' is non-NULL or directly allocate 'tgtdev', so the WARN_ON() is impossible to hit. Just remove it to silence the checker's complains. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=205003 Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b1bcd245ec66..07ef3d924aae 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2132,7 +2132,6 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) { struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; - WARN_ON(!tgtdev); mutex_lock(&fs_devices->device_list_mutex); btrfs_sysfs_rm_device_link(fs_devices, tgtdev); -- cgit v1.2.3-59-g8ed1b From 6bb6b51447ca114980c2debab8241ae21f8557c7 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 5 Dec 2019 10:36:04 -0800 Subject: btrfs: use simple_dir_inode_operations for placeholder subvolume directory When you snapshot a subvolume containing a subvolume, you get a placeholder directory where the subvolume would be. These directories have their own btrfs_dir_ro_inode_operations. Al pointed out [1] that these directories can use simple_lookup() instead of btrfs_lookup(), as they are always empty. Furthermore, they can use the default generic_permission() instead of btrfs_permission(); the additional checks in the latter don't matter because we can't write to the directory anyways. Finally, they can use the default generic_update_time() instead of btrfs_update_time(), as the inode doesn't exist on disk and doesn't need any special handling. All together, this means that we can get rid of btrfs_dir_ro_inode_operations and use simple_dir_inode_operations instead. 1: https://lore.kernel.org/linux-btrfs/20190929052934.GY26530@ZenIV.linux.org.uk/ Cc: Al Viro Reviewed-by: Josef Bacik Signed-off-by: Omar Sandoval Reviewed-by: David Sterba [ add comment ] Signed-off-by: David Sterba --- fs/btrfs/inode.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 73c148eaae9f..4b2d151b6d7b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -64,7 +64,6 @@ struct btrfs_dio_data { static const struct inode_operations btrfs_dir_inode_operations; static const struct inode_operations btrfs_symlink_inode_operations; -static const struct inode_operations btrfs_dir_ro_inode_operations; static const struct inode_operations btrfs_special_inode_operations; static const struct inode_operations btrfs_file_inode_operations; static const struct address_space_operations btrfs_aops; @@ -5869,7 +5868,11 @@ static struct inode *new_simple_dir(struct super_block *s, set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; - inode->i_op = &btrfs_dir_ro_inode_operations; + /* + * We only need lookup, the rest is read-only and there's no inode + * associated with the dentry + */ + inode->i_op = &simple_dir_inode_operations; inode->i_opflags &= ~IOP_XATTR; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; @@ -11010,11 +11013,6 @@ static const struct inode_operations btrfs_dir_inode_operations = { .update_time = btrfs_update_time, .tmpfile = btrfs_tmpfile, }; -static const struct inode_operations btrfs_dir_ro_inode_operations = { - .lookup = btrfs_lookup, - .permission = btrfs_permission, - .update_time = btrfs_update_time, -}; static const struct file_operations btrfs_dir_file_operations = { .llseek = generic_file_llseek, -- cgit v1.2.3-59-g8ed1b From 3fff3975a7155c49b85736786c6bcb1dd9dcec18 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Tue, 26 Nov 2019 09:40:05 +0100 Subject: btrfs: decrement number of open devices after closing the device not before In btrfs_close_one_device we're decrementing the number of open devices before we're calling btrfs_close_bdev(). As there is no intermediate exit between these points in this function it is technically OK to do so, but it makes the code a bit harder to understand. Move both operations closer together and move the decrement step after btrfs_close_bdev(). Reviewed-by: Qu Wenruo Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 07ef3d924aae..ac7b483754c4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1069,9 +1069,6 @@ static void btrfs_close_one_device(struct btrfs_device *device) struct btrfs_device *new_device; struct rcu_string *name; - if (device->bdev) - fs_devices->open_devices--; - if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { list_del_init(&device->dev_alloc_list); @@ -1082,6 +1079,8 @@ static void btrfs_close_one_device(struct btrfs_device *device) fs_devices->missing_devices--; btrfs_close_bdev(device); + if (device->bdev) + fs_devices->open_devices--; new_device = btrfs_alloc_device(NULL, &device->devid, device->uuid); -- cgit v1.2.3-59-g8ed1b From 321f69f86a0fc40203b43659c3a39464f15c2ee9 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 4 Dec 2019 14:36:39 +0100 Subject: btrfs: reset device back to allocation state when removing When closing a device, btrfs_close_one_device() first allocates a new device, copies the device to close's name, replaces it in the dev_list with the copy and then finally frees it. This involves two memory allocation, which can potentially fail. As this code path is tricky to unwind, the allocation failures where handled by BUG_ON()s. But this copying isn't strictly needed, all that is needed is resetting the device in question to it's state it had after the allocation. Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ac7b483754c4..86990b7a60ed 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1066,8 +1066,6 @@ static void btrfs_close_bdev(struct btrfs_device *device) static void btrfs_close_one_device(struct btrfs_device *device) { struct btrfs_fs_devices *fs_devices = device->fs_devices; - struct btrfs_device *new_device; - struct rcu_string *name; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { @@ -1079,25 +1077,22 @@ static void btrfs_close_one_device(struct btrfs_device *device) fs_devices->missing_devices--; btrfs_close_bdev(device); - if (device->bdev) + if (device->bdev) { fs_devices->open_devices--; - - new_device = btrfs_alloc_device(NULL, &device->devid, - device->uuid); - BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ - - /* Safe because we are under uuid_mutex */ - if (device->name) { - name = rcu_string_strdup(device->name->str, GFP_NOFS); - BUG_ON(!name); /* -ENOMEM */ - rcu_assign_pointer(new_device->name, name); + device->bdev = NULL; } + clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); - list_replace_rcu(&device->dev_list, &new_device->dev_list); - new_device->fs_devices = device->fs_devices; + device->fs_info = NULL; + atomic_set(&device->dev_stats_ccnt, 0); + extent_io_tree_release(&device->alloc_state); - synchronize_rcu(); - btrfs_free_device(device); + /* Verify the device is back in a pristine state */ + ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); + ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); + ASSERT(list_empty(&device->dev_alloc_list)); + ASSERT(list_empty(&device->post_commit_list)); + ASSERT(atomic_read(&device->reada_in_flight) == 0); } static int close_fs_devices(struct btrfs_fs_devices *fs_devices) -- cgit v1.2.3-59-g8ed1b From e62958fce94b30ef6aab99bb1bc49fde7fac73ea Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:17 -0800 Subject: btrfs: get rid of trivial __btrfs_lookup_bio_sums() wrappers Currently, we have two wrappers for __btrfs_lookup_bio_sums(): btrfs_lookup_bio_sums_dio(), which is used for direct I/O, and btrfs_lookup_bio_sums(), which is used everywhere else. The only difference is that the _dio variant looks up csums starting at the given offset instead of using the page index, which isn't actually direct I/O-specific. Let's clean up the signature and return value of __btrfs_lookup_bio_sums(), rename it to btrfs_lookup_bio_sums(), and get rid of the trivial helpers. Reviewed-by: Nikolay Borisov Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/compression.c | 4 ++-- fs/btrfs/ctree.h | 4 +--- fs/btrfs/file-item.c | 35 +++++++++++++++++------------------ fs/btrfs/inode.c | 6 +++--- 4 files changed, 23 insertions(+), 26 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 43e1660f450f..b6373de176f8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -763,7 +763,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_lookup_bio_sums(inode, comp_bio, - sums); + false, 0, sums); BUG_ON(ret); /* -ENOMEM */ } @@ -791,7 +791,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, BUG_ON(ret); /* -ENOMEM */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); + ret = btrfs_lookup_bio_sums(inode, comp_bio, false, 0, sums); BUG_ON(ret); /* -ENOMEM */ } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ea49e4b52cd2..b1c971cea33a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2789,9 +2789,7 @@ struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, - u8 *dst); -blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, - u64 logical_offset); + bool at_offset, u64 offset, u8 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b1bfdc5c1387..b7f5394c37a1 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -148,8 +148,21 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, return ret; } -static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, - u64 logical_offset, u8 *dst, int dio) +/** + * btrfs_lookup_bio_sums - Look up checksums for a bio. + * @inode: inode that the bio is for. + * @bio: bio embedded in btrfs_io_bio. + * @at_offset: If true, look up checksums for the extent at @offset. + * If false, use the page offsets from the bio. + * @offset: If @at_offset is true, offset in file to look up checksums for. + * Ignored otherwise. + * @dst: Buffer of size btrfs_super_csum_size() used to return checksum. If + * NULL, the checksum is returned in btrfs_io_bio(bio)->csum instead. + * + * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. + */ +blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, + bool at_offset, u64 offset, u8 *dst) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio_vec bvec; @@ -159,7 +172,6 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_path *path; u8 *csum; - u64 offset = 0; u64 item_start_offset = 0; u64 item_last_offset = 0; u64 disk_bytenr; @@ -205,15 +217,13 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio } disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; - if (dio) - offset = logical_offset; bio_for_each_segment(bvec, bio, iter) { page_bytes_left = bvec.bv_len; if (count) goto next; - if (!dio) + if (!at_offset) offset = page_offset(bvec.bv_page) + bvec.bv_offset; count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, csum, nblocks); @@ -285,18 +295,7 @@ next: WARN_ON_ONCE(count); btrfs_free_path(path); - return 0; -} - -blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, - u8 *dst) -{ - return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); -} - -blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) -{ - return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); + return BLK_STS_OK; } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4b2d151b6d7b..43858122b36e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2127,7 +2127,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, bio_flags); goto out; } else if (!skip_sum) { - ret = btrfs_lookup_bio_sums(inode, bio, NULL); + ret = btrfs_lookup_bio_sums(inode, bio, false, 0, NULL); if (ret) goto out; } @@ -8387,8 +8387,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, * contention. */ if (dip->logical_offset == file_offset) { - ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio, - file_offset); + ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, true, + file_offset, NULL); if (ret) return ret; } -- cgit v1.2.3-59-g8ed1b From db72e47f79c5dbd95611edd453328d46c1eae93a Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Tue, 10 Dec 2019 10:37:35 -0800 Subject: btrfs: get rid of at_offset parameter to btrfs_lookup_bio_sums() We can encode this in the offset parameter: -1 means use the page offsets, anything else is a valid offset. Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/compression.c | 4 ++-- fs/btrfs/ctree.h | 2 +- fs/btrfs/file-item.c | 11 +++++------ fs/btrfs/inode.c | 6 +++--- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b6373de176f8..de95ad27722f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -763,7 +763,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_lookup_bio_sums(inode, comp_bio, - false, 0, sums); + (u64)-1, sums); BUG_ON(ret); /* -ENOMEM */ } @@ -791,7 +791,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, BUG_ON(ret); /* -ENOMEM */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - ret = btrfs_lookup_bio_sums(inode, comp_bio, false, 0, sums); + ret = btrfs_lookup_bio_sums(inode, comp_bio, (u64)-1, sums); BUG_ON(ret); /* -ENOMEM */ } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b1c971cea33a..f895fb490b75 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2789,7 +2789,7 @@ struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, - bool at_offset, u64 offset, u8 *dst); + u64 offset, u8 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b7f5394c37a1..b670014bfc1c 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -152,17 +152,15 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, * btrfs_lookup_bio_sums - Look up checksums for a bio. * @inode: inode that the bio is for. * @bio: bio embedded in btrfs_io_bio. - * @at_offset: If true, look up checksums for the extent at @offset. - * If false, use the page offsets from the bio. - * @offset: If @at_offset is true, offset in file to look up checksums for. - * Ignored otherwise. + * @offset: Unless (u64)-1, look up checksums for this offset in the file. + * If (u64)-1, use the page offsets from the bio instead. * @dst: Buffer of size btrfs_super_csum_size() used to return checksum. If * NULL, the checksum is returned in btrfs_io_bio(bio)->csum instead. * * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. */ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, - bool at_offset, u64 offset, u8 *dst) + u64 offset, u8 *dst) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio_vec bvec; @@ -171,6 +169,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, struct btrfs_csum_item *item = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_path *path; + const bool page_offsets = (offset == (u64)-1); u8 *csum; u64 item_start_offset = 0; u64 item_last_offset = 0; @@ -223,7 +222,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, if (count) goto next; - if (!at_offset) + if (page_offsets) offset = page_offset(bvec.bv_page) + bvec.bv_offset; count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, csum, nblocks); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43858122b36e..4d1b3e8449d1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2127,7 +2127,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, bio_flags); goto out; } else if (!skip_sum) { - ret = btrfs_lookup_bio_sums(inode, bio, false, 0, NULL); + ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL); if (ret) goto out; } @@ -8387,8 +8387,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, * contention. */ if (dip->logical_offset == file_offset) { - ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, true, - file_offset, NULL); + ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, file_offset, + NULL); if (ret) return ret; } -- cgit v1.2.3-59-g8ed1b From 313facc5bd83c8090f28ab1ee71ff01b01d20524 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:18 -0800 Subject: btrfs: remove dead snapshot-aware defrag code Snapshot-aware defrag has been disabled since commit 8101c8dbf624 ("Btrfs: disable snapshot aware defrag for now") almost 6 years ago. Let's remove the dead code. If someone is up to the task of bringing it back, they can dig it up from git. This is logically a revert of commit 38c227d87c49 ("Btrfs: snapshot-aware defrag") except that now we have to clear the EXTENT_DEFRAG bit to avoid need_force_cow() returning true forever. The reasons to disable were caused by runtime problems (like long stalls or memory consumption) on heavily referenced extents (eg. thousands of snapshots). There were attempts to fix that but never finished. Current defrag breaks the extent references and some users prefer that behaviour over the one implemented by snapshot aware (ie. keeping links for defragmentation). To enable both usecases we'd need to extend defrag ioctl but let's do that properly from scratch. Reviewed-by: Nikolay Borisov Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba [ enhance ] Signed-off-by: David Sterba --- fs/btrfs/inode.c | 695 +------------------------------------------------------ 1 file changed, 11 insertions(+), 684 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4d1b3e8449d1..86bde32bbcea 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -44,7 +44,6 @@ #include "locking.h" #include "free-space-cache.h" #include "inode-map.h" -#include "backref.h" #include "props.h" #include "qgroup.h" #include "delalloc-space.h" @@ -2393,649 +2392,6 @@ out: return ret; } -/* snapshot-aware defrag */ -struct sa_defrag_extent_backref { - struct rb_node node; - struct old_sa_defrag_extent *old; - u64 root_id; - u64 inum; - u64 file_pos; - u64 extent_offset; - u64 num_bytes; - u64 generation; -}; - -struct old_sa_defrag_extent { - struct list_head list; - struct new_sa_defrag_extent *new; - - u64 extent_offset; - u64 bytenr; - u64 offset; - u64 len; - int count; -}; - -struct new_sa_defrag_extent { - struct rb_root root; - struct list_head head; - struct btrfs_path *path; - struct inode *inode; - u64 file_pos; - u64 len; - u64 bytenr; - u64 disk_len; - u8 compress_type; -}; - -static int backref_comp(struct sa_defrag_extent_backref *b1, - struct sa_defrag_extent_backref *b2) -{ - if (b1->root_id < b2->root_id) - return -1; - else if (b1->root_id > b2->root_id) - return 1; - - if (b1->inum < b2->inum) - return -1; - else if (b1->inum > b2->inum) - return 1; - - if (b1->file_pos < b2->file_pos) - return -1; - else if (b1->file_pos > b2->file_pos) - return 1; - - /* - * [------------------------------] ===> (a range of space) - * |<--->| |<---->| =============> (fs/file tree A) - * |<---------------------------->| ===> (fs/file tree B) - * - * A range of space can refer to two file extents in one tree while - * refer to only one file extent in another tree. - * - * So we may process a disk offset more than one time(two extents in A) - * and locate at the same extent(one extent in B), then insert two same - * backrefs(both refer to the extent in B). - */ - return 0; -} - -static void backref_insert(struct rb_root *root, - struct sa_defrag_extent_backref *backref) -{ - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct sa_defrag_extent_backref *entry; - int ret; - - while (*p) { - parent = *p; - entry = rb_entry(parent, struct sa_defrag_extent_backref, node); - - ret = backref_comp(backref, entry); - if (ret < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&backref->node, parent, p); - rb_insert_color(&backref->node, root); -} - -/* - * Note the backref might has changed, and in this case we just return 0. - */ -static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, - void *ctx) -{ - struct btrfs_file_extent_item *extent; - struct old_sa_defrag_extent *old = ctx; - struct new_sa_defrag_extent *new = old->new; - struct btrfs_path *path = new->path; - struct btrfs_key key; - struct btrfs_root *root; - struct sa_defrag_extent_backref *backref; - struct extent_buffer *leaf; - struct inode *inode = new->inode; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int slot; - int ret; - u64 extent_offset; - u64 num_bytes; - - if (BTRFS_I(inode)->root->root_key.objectid == root_id && - inum == btrfs_ino(BTRFS_I(inode))) - return 0; - - key.objectid = root_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - root = btrfs_read_fs_root_no_name(fs_info, &key); - if (IS_ERR(root)) { - if (PTR_ERR(root) == -ENOENT) - return 0; - WARN_ON(1); - btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu", - inum, offset, root_id); - return PTR_ERR(root); - } - - key.objectid = inum; - key.type = BTRFS_EXTENT_DATA_KEY; - if (offset > (u64)-1 << 32) - key.offset = 0; - else - key.offset = offset; - - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (WARN_ON(ret < 0)) - return ret; - ret = 0; - - while (1) { - cond_resched(); - - leaf = path->nodes[0]; - slot = path->slots[0]; - - if (slot >= btrfs_header_nritems(leaf)) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) { - goto out; - } else if (ret > 0) { - ret = 0; - goto out; - } - continue; - } - - path->slots[0]++; - - btrfs_item_key_to_cpu(leaf, &key, slot); - - if (key.objectid > inum) - goto out; - - if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) - continue; - - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - - if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) - continue; - - /* - * 'offset' refers to the exact key.offset, - * NOT the 'offset' field in btrfs_extent_data_ref, ie. - * (key.offset - extent_offset). - */ - if (key.offset != offset) - continue; - - extent_offset = btrfs_file_extent_offset(leaf, extent); - num_bytes = btrfs_file_extent_num_bytes(leaf, extent); - - if (extent_offset >= old->extent_offset + old->offset + - old->len || extent_offset + num_bytes <= - old->extent_offset + old->offset) - continue; - break; - } - - backref = kmalloc(sizeof(*backref), GFP_NOFS); - if (!backref) { - ret = -ENOENT; - goto out; - } - - backref->root_id = root_id; - backref->inum = inum; - backref->file_pos = offset; - backref->num_bytes = num_bytes; - backref->extent_offset = extent_offset; - backref->generation = btrfs_file_extent_generation(leaf, extent); - backref->old = old; - backref_insert(&new->root, backref); - old->count++; -out: - btrfs_release_path(path); - WARN_ON(ret); - return ret; -} - -static noinline bool record_extent_backrefs(struct btrfs_path *path, - struct new_sa_defrag_extent *new) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); - struct old_sa_defrag_extent *old, *tmp; - int ret; - - new->path = path; - - list_for_each_entry_safe(old, tmp, &new->head, list) { - ret = iterate_inodes_from_logical(old->bytenr + - old->extent_offset, fs_info, - path, record_one_backref, - old, false); - if (ret < 0 && ret != -ENOENT) - return false; - - /* no backref to be processed for this extent */ - if (!old->count) { - list_del(&old->list); - kfree(old); - } - } - - if (list_empty(&new->head)) - return false; - - return true; -} - -static int relink_is_mergable(struct extent_buffer *leaf, - struct btrfs_file_extent_item *fi, - struct new_sa_defrag_extent *new) -{ - if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) - return 0; - - if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) - return 0; - - if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) - return 0; - - if (btrfs_file_extent_encryption(leaf, fi) || - btrfs_file_extent_other_encoding(leaf, fi)) - return 0; - - return 1; -} - -/* - * Note the backref might has changed, and in this case we just return 0. - */ -static noinline int relink_extent_backref(struct btrfs_path *path, - struct sa_defrag_extent_backref *prev, - struct sa_defrag_extent_backref *backref) -{ - struct btrfs_file_extent_item *extent; - struct btrfs_file_extent_item *item; - struct btrfs_ordered_extent *ordered; - struct btrfs_trans_handle *trans; - struct btrfs_ref ref = { 0 }; - struct btrfs_root *root; - struct btrfs_key key; - struct extent_buffer *leaf; - struct old_sa_defrag_extent *old = backref->old; - struct new_sa_defrag_extent *new = old->new; - struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); - struct inode *inode; - struct extent_state *cached = NULL; - int ret = 0; - u64 start; - u64 len; - u64 lock_start; - u64 lock_end; - bool merge = false; - int index; - - if (prev && prev->root_id == backref->root_id && - prev->inum == backref->inum && - prev->file_pos + prev->num_bytes == backref->file_pos) - merge = true; - - /* step 1: get root */ - key.objectid = backref->root_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - index = srcu_read_lock(&fs_info->subvol_srcu); - - root = btrfs_read_fs_root_no_name(fs_info, &key); - if (IS_ERR(root)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); - if (PTR_ERR(root) == -ENOENT) - return 0; - return PTR_ERR(root); - } - - if (btrfs_root_readonly(root)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); - return 0; - } - - /* step 2: get inode */ - key.objectid = backref->inum; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - - inode = btrfs_iget(fs_info->sb, &key, root); - if (IS_ERR(inode)) { - srcu_read_unlock(&fs_info->subvol_srcu, index); - return 0; - } - - srcu_read_unlock(&fs_info->subvol_srcu, index); - - /* step 3: relink backref */ - lock_start = backref->file_pos; - lock_end = backref->file_pos + backref->num_bytes - 1; - lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, - &cached); - - ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); - if (ordered) { - btrfs_put_ordered_extent(ordered); - goto out_unlock; - } - - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out_unlock; - } - - key.objectid = backref->inum; - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = backref->file_pos; - - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - goto out_free_path; - } else if (ret > 0) { - ret = 0; - goto out_free_path; - } - - extent = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - - if (btrfs_file_extent_generation(path->nodes[0], extent) != - backref->generation) - goto out_free_path; - - btrfs_release_path(path); - - start = backref->file_pos; - if (backref->extent_offset < old->extent_offset + old->offset) - start += old->extent_offset + old->offset - - backref->extent_offset; - - len = min(backref->extent_offset + backref->num_bytes, - old->extent_offset + old->offset + old->len); - len -= max(backref->extent_offset, old->extent_offset + old->offset); - - ret = btrfs_drop_extents(trans, root, inode, start, - start + len, 1); - if (ret) - goto out_free_path; -again: - key.objectid = btrfs_ino(BTRFS_I(inode)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = start; - - path->leave_spinning = 1; - if (merge) { - struct btrfs_file_extent_item *fi; - u64 extent_len; - struct btrfs_key found_key; - - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) - goto out_free_path; - - path->slots[0]--; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - extent_len = btrfs_file_extent_num_bytes(leaf, fi); - - if (extent_len + found_key.offset == start && - relink_is_mergable(leaf, fi, new)) { - btrfs_set_file_extent_num_bytes(leaf, fi, - extent_len + len); - btrfs_mark_buffer_dirty(leaf); - inode_add_bytes(inode, len); - - ret = 1; - goto out_free_path; - } else { - merge = false; - btrfs_release_path(path); - goto again; - } - } - - ret = btrfs_insert_empty_item(trans, root, path, &key, - sizeof(*extent)); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto out_free_path; - } - - leaf = path->nodes[0]; - item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); - btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); - btrfs_set_file_extent_num_bytes(leaf, item, len); - btrfs_set_file_extent_ram_bytes(leaf, item, new->len); - btrfs_set_file_extent_generation(leaf, item, trans->transid); - btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_compression(leaf, item, new->compress_type); - btrfs_set_file_extent_encryption(leaf, item, 0); - btrfs_set_file_extent_other_encoding(leaf, item, 0); - - btrfs_mark_buffer_dirty(leaf); - inode_add_bytes(inode, len); - btrfs_release_path(path); - - btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new->bytenr, - new->disk_len, 0); - btrfs_init_data_ref(&ref, backref->root_id, backref->inum, - new->file_pos); /* start - extent_offset */ - ret = btrfs_inc_extent_ref(trans, &ref); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto out_free_path; - } - - ret = 1; -out_free_path: - btrfs_release_path(path); - path->leave_spinning = 0; - btrfs_end_transaction(trans); -out_unlock: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, - &cached); - iput(inode); - return ret; -} - -static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) -{ - struct old_sa_defrag_extent *old, *tmp; - - if (!new) - return; - - list_for_each_entry_safe(old, tmp, &new->head, list) { - kfree(old); - } - kfree(new); -} - -static void relink_file_extents(struct new_sa_defrag_extent *new) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); - struct btrfs_path *path; - struct sa_defrag_extent_backref *backref; - struct sa_defrag_extent_backref *prev = NULL; - struct rb_node *node; - int ret; - - path = btrfs_alloc_path(); - if (!path) - return; - - if (!record_extent_backrefs(path, new)) { - btrfs_free_path(path); - goto out; - } - btrfs_release_path(path); - - while (1) { - node = rb_first(&new->root); - if (!node) - break; - rb_erase(node, &new->root); - - backref = rb_entry(node, struct sa_defrag_extent_backref, node); - - ret = relink_extent_backref(path, prev, backref); - WARN_ON(ret < 0); - - kfree(prev); - - if (ret == 1) - prev = backref; - else - prev = NULL; - cond_resched(); - } - kfree(prev); - - btrfs_free_path(path); -out: - free_sa_defrag_extent(new); - - atomic_dec(&fs_info->defrag_running); - wake_up(&fs_info->transaction_wait); -} - -static struct new_sa_defrag_extent * -record_old_file_extents(struct inode *inode, - struct btrfs_ordered_extent *ordered) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_path *path; - struct btrfs_key key; - struct old_sa_defrag_extent *old; - struct new_sa_defrag_extent *new; - int ret; - - new = kmalloc(sizeof(*new), GFP_NOFS); - if (!new) - return NULL; - - new->inode = inode; - new->file_pos = ordered->file_offset; - new->len = ordered->len; - new->bytenr = ordered->start; - new->disk_len = ordered->disk_len; - new->compress_type = ordered->compress_type; - new->root = RB_ROOT; - INIT_LIST_HEAD(&new->head); - - path = btrfs_alloc_path(); - if (!path) - goto out_kfree; - - key.objectid = btrfs_ino(BTRFS_I(inode)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = new->file_pos; - - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) - goto out_free_path; - if (ret > 0 && path->slots[0] > 0) - path->slots[0]--; - - /* find out all the old extents for the file range */ - while (1) { - struct btrfs_file_extent_item *extent; - struct extent_buffer *l; - int slot; - u64 num_bytes; - u64 offset; - u64 end; - u64 disk_bytenr; - u64 extent_offset; - - l = path->nodes[0]; - slot = path->slots[0]; - - if (slot >= btrfs_header_nritems(l)) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out_free_path; - else if (ret > 0) - break; - continue; - } - - btrfs_item_key_to_cpu(l, &key, slot); - - if (key.objectid != btrfs_ino(BTRFS_I(inode))) - break; - if (key.type != BTRFS_EXTENT_DATA_KEY) - break; - if (key.offset >= new->file_pos + new->len) - break; - - extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); - - num_bytes = btrfs_file_extent_num_bytes(l, extent); - if (key.offset + num_bytes < new->file_pos) - goto next; - - disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); - if (!disk_bytenr) - goto next; - - extent_offset = btrfs_file_extent_offset(l, extent); - - old = kmalloc(sizeof(*old), GFP_NOFS); - if (!old) - goto out_free_path; - - offset = max(new->file_pos, key.offset); - end = min(new->file_pos + new->len, key.offset + num_bytes); - - old->bytenr = disk_bytenr; - old->extent_offset = extent_offset; - old->offset = offset - key.offset; - old->len = end - offset; - old->new = new; - old->count = 0; - list_add_tail(&old->list, &new->head); -next: - path->slots[0]++; - cond_resched(); - } - - btrfs_free_path(path); - atomic_inc(&fs_info->defrag_running); - - return new; - -out_free_path: - btrfs_free_path(path); -out_kfree: - free_sa_defrag_extent(new); - return NULL; -} - static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, u64 start, u64 len) { @@ -3063,7 +2419,6 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) struct btrfs_trans_handle *trans = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; - struct new_sa_defrag_extent *new = NULL; int compress_type = 0; int ret = 0; u64 logical_len = ordered_extent->len; @@ -3072,6 +2427,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) bool range_locked = false; bool clear_new_delalloc_bytes = false; bool clear_reserved_extent = true; + unsigned int clear_bits; if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && @@ -3130,20 +2486,6 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ordered_extent->file_offset + ordered_extent->len - 1, &cached_state); - ret = test_range_bit(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - EXTENT_DEFRAG, 0, cached_state); - if (ret) { - u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); - if (0 && last_snapshot >= BTRFS_I(inode)->generation) - /* the inode is shared */ - new = record_old_file_extents(inode, ordered_extent); - - clear_extent_bit(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - EXTENT_DEFRAG, 0, 0, &cached_state); - } - if (freespace_inode) trans = btrfs_join_transaction_spacecache(root); else @@ -3204,21 +2546,16 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) } ret = 0; out: - if (range_locked || clear_new_delalloc_bytes) { - unsigned int clear_bits = 0; - - if (range_locked) - clear_bits |= EXTENT_LOCKED; - if (clear_new_delalloc_bytes) - clear_bits |= EXTENT_DELALLOC_NEW; - clear_extent_bit(&BTRFS_I(inode)->io_tree, - ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1, - clear_bits, - (clear_bits & EXTENT_LOCKED) ? 1 : 0, - 0, &cached_state); - } + clear_bits = EXTENT_DEFRAG; + if (range_locked) + clear_bits |= EXTENT_LOCKED; + if (clear_new_delalloc_bytes) + clear_bits |= EXTENT_DELALLOC_NEW; + clear_extent_bit(&BTRFS_I(inode)->io_tree, + ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + clear_bits, (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0, + &cached_state); if (trans) btrfs_end_transaction(trans); @@ -3271,16 +2608,6 @@ out: */ btrfs_remove_ordered_extent(inode, ordered_extent); - /* for snapshot-aware defrag */ - if (new) { - if (ret) { - free_sa_defrag_extent(new); - atomic_dec(&fs_info->defrag_running); - } else { - relink_file_extents(new); - } - } - /* once for us */ btrfs_put_ordered_extent(ordered_extent); /* once for the tree */ -- cgit v1.2.3-59-g8ed1b From bffe633e00fb6b904817137fc17a44b42efcd985 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:19 -0800 Subject: btrfs: make btrfs_ordered_extent naming consistent with btrfs_file_extent_item ordered->start, ordered->len, and ordered->disk_len correspond to fi->disk_bytenr, fi->num_bytes, and fi->disk_num_bytes, respectively. It's confusing to translate between the two naming schemes. Since a btrfs_ordered_extent is basically a pending btrfs_file_extent_item, let's make the former use the naming from the latter. Note that I didn't touch the names in tracepoints just in case there are scripts depending on the current naming. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file-item.c | 4 +-- fs/btrfs/file.c | 6 ++-- fs/btrfs/inode.c | 70 ++++++++++++++++++++------------------------ fs/btrfs/ordered-data.c | 69 +++++++++++++++++++++---------------------- fs/btrfs/ordered-data.h | 26 ++++++++-------- fs/btrfs/relocation.c | 4 +-- include/trace/events/btrfs.h | 6 ++-- 7 files changed, 90 insertions(+), 95 deletions(-) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b670014bfc1c..bb374042d297 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -481,8 +481,8 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, - 1); for (i = 0; i < nr_sectors; i++) { - if (offset >= ordered->file_offset + ordered->len || - offset < ordered->file_offset) { + if (offset >= ordered->file_offset + ordered->num_bytes || + offset < ordered->file_offset) { unsigned long bytes_left; sums->len = this_sum_bytes; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8d47c76b7bd1..76c68c70d3e2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1501,7 +1501,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, ordered = btrfs_lookup_ordered_range(inode, start_pos, last_pos - start_pos + 1); if (ordered && - ordered->file_offset + ordered->len > start_pos && + ordered->file_offset + ordered->num_bytes > start_pos && ordered->file_offset <= last_pos) { unlock_extent_cached(&inode->io_tree, start_pos, last_pos, cached_state); @@ -2426,7 +2426,7 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, * we need to try again. */ if ((!ordered || - (ordered->file_offset + ordered->len <= lockstart || + (ordered->file_offset + ordered->num_bytes <= lockstart || ordered->file_offset > lockend)) && !filemap_range_has_page(inode->i_mapping, lockstart, lockend)) { @@ -3248,7 +3248,7 @@ static long btrfs_fallocate(struct file *file, int mode, ordered = btrfs_lookup_first_ordered_extent(inode, locked_end); if (ordered && - ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset + ordered->num_bytes > alloc_start && ordered->file_offset < alloc_end) { btrfs_put_ordered_extent(ordered); unlock_extent_cached(&BTRFS_I(inode)->io_tree, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 86bde32bbcea..0695d64df05b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2419,9 +2419,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) struct btrfs_trans_handle *trans = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; + u64 start, end; int compress_type = 0; int ret = 0; - u64 logical_len = ordered_extent->len; + u64 logical_len = ordered_extent->num_bytes; bool freespace_inode; bool truncated = false; bool range_locked = false; @@ -2429,6 +2430,9 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) bool clear_reserved_extent = true; unsigned int clear_bits; + start = ordered_extent->file_offset; + end = start + ordered_extent->num_bytes - 1; + if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) @@ -2441,10 +2445,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) goto out; } - btrfs_free_io_failure_record(BTRFS_I(inode), - ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1); + btrfs_free_io_failure_record(BTRFS_I(inode), start, end); if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { truncated = true; @@ -2462,8 +2463,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) * space for NOCOW range. * As NOCOW won't cause a new delayed ref, just free the space */ - btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, - ordered_extent->len); + btrfs_qgroup_free_data(inode, NULL, start, + ordered_extent->num_bytes); btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (freespace_inode) trans = btrfs_join_transaction_spacecache(root); @@ -2482,9 +2483,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) } range_locked = true; - lock_extent_bits(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - &cached_state); + lock_extent_bits(io_tree, start, end, &cached_state); if (freespace_inode) trans = btrfs_join_transaction_spacecache(root); @@ -2502,31 +2501,30 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { BUG_ON(compress_type); - btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, - ordered_extent->len); + btrfs_qgroup_free_data(inode, NULL, start, + ordered_extent->num_bytes); ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), ordered_extent->file_offset, ordered_extent->file_offset + logical_len); } else { BUG_ON(root == fs_info->tree_root); - ret = insert_reserved_file_extent(trans, inode, - ordered_extent->file_offset, - ordered_extent->start, - ordered_extent->disk_len, + ret = insert_reserved_file_extent(trans, inode, start, + ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes, logical_len, logical_len, compress_type, 0, 0, BTRFS_FILE_EXTENT_REG); if (!ret) { clear_reserved_extent = false; btrfs_release_delalloc_bytes(fs_info, - ordered_extent->start, - ordered_extent->disk_len); + ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes); } } unpin_extent_cache(&BTRFS_I(inode)->extent_tree, - ordered_extent->file_offset, ordered_extent->len, - trans->transid); + ordered_extent->file_offset, + ordered_extent->num_bytes, trans->transid); if (ret < 0) { btrfs_abort_transaction(trans, ret); goto out; @@ -2551,27 +2549,22 @@ out: clear_bits |= EXTENT_LOCKED; if (clear_new_delalloc_bytes) clear_bits |= EXTENT_DELALLOC_NEW; - clear_extent_bit(&BTRFS_I(inode)->io_tree, - ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - clear_bits, (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0, + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, + (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0, &cached_state); if (trans) btrfs_end_transaction(trans); if (ret || truncated) { - u64 start, end; + u64 unwritten_start = start; if (truncated) - start = ordered_extent->file_offset + logical_len; - else - start = ordered_extent->file_offset; - end = ordered_extent->file_offset + ordered_extent->len - 1; - clear_extent_uptodate(io_tree, start, end, NULL); + unwritten_start += logical_len; + clear_extent_uptodate(io_tree, unwritten_start, end, NULL); /* Drop the cache for the part of the extent we didn't write. */ - btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), unwritten_start, end, 0); /* * If the ordered extent had an IOERR or something else went @@ -2593,15 +2586,15 @@ out: */ if (ret && btrfs_test_opt(fs_info, DISCARD)) btrfs_discard_extent(fs_info, - ordered_extent->start, - ordered_extent->disk_len, NULL); + ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes, + NULL); btrfs_free_reserved_extent(fs_info, - ordered_extent->start, - ordered_extent->disk_len, 1); + ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes, 1); } } - /* * This needs to be done to make sure anybody waiting knows we are done * updating everything for this ordered extent. @@ -8228,7 +8221,8 @@ again: ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, page_end - start + 1); if (ordered) { - end = min(page_end, ordered->file_offset + ordered->len - 1); + end = min(page_end, + ordered->file_offset + ordered->num_bytes - 1); /* * IO on this page will never be started, so we need * to account for any ordered extents now @@ -8753,7 +8747,7 @@ void btrfs_destroy_inode(struct inode *inode) else { btrfs_err(fs_info, "found ordered extent %llu %llu on inode cleanup", - ordered->file_offset, ordered->len); + ordered->file_offset, ordered->num_bytes); btrfs_remove_ordered_extent(inode, ordered); btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index ed29c30802ab..ecb9fb6a6fe0 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -20,9 +20,9 @@ static struct kmem_cache *btrfs_ordered_extent_cache; static u64 entry_end(struct btrfs_ordered_extent *entry) { - if (entry->file_offset + entry->len < entry->file_offset) + if (entry->file_offset + entry->num_bytes < entry->file_offset) return (u64)-1; - return entry->file_offset + entry->len; + return entry->file_offset + entry->num_bytes; } /* returns NULL if the insertion worked, or it returns the node it did find @@ -112,7 +112,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || - entry->file_offset + entry->len <= file_offset) + entry->file_offset + entry->num_bytes <= file_offset) return 0; return 1; } @@ -121,7 +121,7 @@ static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, u64 len) { if (file_offset + len <= entry->file_offset || - entry->file_offset + entry->len <= file_offset) + entry->file_offset + entry->num_bytes <= file_offset) return 0; return 1; } @@ -153,19 +153,14 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, } /* allocate and add a new ordered_extent into the per-inode tree. - * file_offset is the logical offset in the file - * - * start is the disk block number of an extent already reserved in the - * extent allocation tree - * - * len is the length of the extent * * The tree is given a single reference on the ordered extent that was * inserted. */ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, - int type, int dio, int compress_type) + u64 disk_bytenr, u64 num_bytes, + u64 disk_num_bytes, int type, int dio, + int compress_type) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; @@ -179,10 +174,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, return -ENOMEM; entry->file_offset = file_offset; - entry->start = start; - entry->len = len; - entry->disk_len = disk_len; - entry->bytes_left = len; + entry->disk_bytenr = disk_bytenr; + entry->num_bytes = num_bytes; + entry->disk_num_bytes = disk_num_bytes; + entry->bytes_left = num_bytes; entry->inode = igrab(inode); entry->compress_type = compress_type; entry->truncated_len = (u64)-1; @@ -190,7 +185,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, set_bit(type, &entry->flags); if (dio) { - percpu_counter_add_batch(&fs_info->dio_bytes, len, + percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes, fs_info->delalloc_batch); set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); } @@ -241,27 +236,30 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, } int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type) + u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, + int type) { - return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 0, + return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, + num_bytes, disk_num_bytes, type, 0, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type) + u64 disk_bytenr, u64 num_bytes, + u64 disk_num_bytes, int type) { - return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 1, + return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, + num_bytes, disk_num_bytes, type, 1, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, - int type, int compress_type) + u64 disk_bytenr, u64 num_bytes, + u64 disk_num_bytes, int type, + int compress_type) { - return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 0, + return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, + num_bytes, disk_num_bytes, type, 0, compress_type); } @@ -322,8 +320,8 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, } dec_start = max(*file_offset, entry->file_offset); - dec_end = min(*file_offset + io_size, entry->file_offset + - entry->len); + dec_end = min(*file_offset + io_size, + entry->file_offset + entry->num_bytes); *file_offset = dec_end; if (dec_start > dec_end) { btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", @@ -465,10 +463,11 @@ void btrfs_remove_ordered_extent(struct inode *inode, btrfs_mod_outstanding_extents(btrfs_inode, -1); spin_unlock(&btrfs_inode->lock); if (root != fs_info->tree_root) - btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); + btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes, + false); if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) - percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len, + percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes, fs_info->delalloc_batch); tree = &btrfs_inode->ordered_tree; @@ -528,8 +527,8 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, ordered = list_first_entry(&splice, struct btrfs_ordered_extent, root_extent_list); - if (range_end <= ordered->start || - ordered->start + ordered->disk_len <= range_start) { + if (range_end <= ordered->disk_bytenr || + ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { list_move_tail(&ordered->root_extent_list, &skipped); cond_resched_lock(&root->ordered_extent_lock); continue; @@ -613,7 +612,7 @@ void btrfs_start_ordered_extent(struct inode *inode, int wait) { u64 start = entry->file_offset; - u64 end = start + entry->len - 1; + u64 end = start + entry->num_bytes - 1; trace_btrfs_ordered_extent_start(inode, entry); @@ -674,7 +673,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) btrfs_put_ordered_extent(ordered); break; } - if (ordered->file_offset + ordered->len <= start) { + if (ordered->file_offset + ordered->num_bytes <= start) { btrfs_put_ordered_extent(ordered); break; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 4eb0319a86d7..3beb4da4ab41 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -67,14 +67,13 @@ struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; - /* disk byte number */ - u64 start; - - /* ram length of the extent in bytes */ - u64 len; - - /* extent length on disk */ - u64 disk_len; + /* + * These fields directly correspond to the same fields in + * btrfs_file_extent_item. + */ + u64 disk_bytenr; + u64 num_bytes; + u64 disk_num_bytes; /* number of bytes that still need writing */ u64 bytes_left; @@ -161,12 +160,15 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, u64 *file_offset, u64 io_size, int uptodate); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type); + u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, + int type); int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type); + u64 disk_bytenr, u64 num_bytes, + u64 disk_num_bytes, int type); int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, - int type, int compress_type); + u64 disk_bytenr, u64 num_bytes, + u64 disk_num_bytes, int type, + int compress_type); void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1875e84b9bf7..995d4b8b1cfd 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4668,7 +4668,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) LIST_HEAD(list); ordered = btrfs_lookup_ordered_extent(inode, file_pos); - BUG_ON(ordered->file_offset != file_pos || ordered->len != len); + BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, @@ -4692,7 +4692,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) * disk_len vs real len like with real inodes since it's all * disk length. */ - new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); + new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; sums->bytenr = new_bytenr; btrfs_add_ordered_sum(ordered, sums); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 620bf1b38fba..17088a112ed0 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -496,9 +496,9 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, TP_fast_assign_btrfs(btrfs_sb(inode->i_sb), __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->file_offset = ordered->file_offset; - __entry->start = ordered->start; - __entry->len = ordered->len; - __entry->disk_len = ordered->disk_len; + __entry->start = ordered->disk_bytenr; + __entry->len = ordered->num_bytes; + __entry->disk_len = ordered->disk_num_bytes; __entry->bytes_left = ordered->bytes_left; __entry->flags = ordered->flags; __entry->compress_type = ordered->compress_type; -- cgit v1.2.3-59-g8ed1b From eb70d2226347efe3b7de771f8034166c2740418d Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:20 -0800 Subject: btrfs: remove unnecessary pg_offset assignments in __extent_writepage() We're initializing pg_offset to 0, setting it immediately, then reassigning it to 0 again after. The former became unnecessary in 211c17f51f46 ("Fix corners in writepage and btrfs_truncate_page"). The latter is a leftover that should've been removed in 40f765805f08 ("Btrfs: split up __extent_writepage to lower stack usage"). Remove both. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2f4802f405a2..b2d0418a4c55 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3562,7 +3562,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 page_end = start + PAGE_SIZE - 1; int ret; int nr = 0; - size_t pg_offset = 0; + size_t pg_offset; loff_t i_size = i_size_read(inode); unsigned long end_index = i_size >> PAGE_SHIFT; unsigned long nr_written = 0; @@ -3591,8 +3591,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, flush_dcache_page(page); } - pg_offset = 0; - set_page_extent_mapped(page); if (!epd->extent_locked) { -- cgit v1.2.3-59-g8ed1b From 169d2c875ec6461f6d18aaddbc1f30ba2b6b5a29 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:21 -0800 Subject: btrfs: remove trivial goto label in __extent_writepage() Since 40f765805f08 ("Btrfs: split up __extent_writepage to lower stack usage"), done_unlocked is simply a return 0. Get rid of it. Mid-statement block returns don seem to make the code less readable here. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b2d0418a4c55..9475e81dc419 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3596,7 +3596,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (!epd->extent_locked) { ret = writepage_delalloc(inode, page, wbc, start, &nr_written); if (ret == 1) - goto done_unlocked; + return 0; if (ret) goto done; } @@ -3604,7 +3604,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ret = __extent_writepage_io(inode, page, wbc, epd, i_size, nr_written, &nr); if (ret == 1) - goto done_unlocked; + return 0; done: if (nr == 0) { @@ -3619,9 +3619,6 @@ done: unlock_page(page); ASSERT(ret <= 0); return ret; - -done_unlocked: - return 0; } void wait_on_extent_buffer_writeback(struct extent_buffer *eb) -- cgit v1.2.3-59-g8ed1b From f95d713b540487901871f90512f01f2fb3e6dfaa Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:22 -0800 Subject: btrfs: remove redundant i_size check in __extent_writepage_io() In __extent_writepage_io(), we check whether i_size <= page_offset(page). Note that if i_size < page_offset(page), then i_size >> PAGE_SHIFT < page->index. If i_size == page_offset(page), then i_size >> PAGE_SHIFT == page->index && offset_in_page(i_size) == 0. __extent_writepage() already has a check for these cases that returns without calling __extent_writepage_io(): end_index = i_size >> PAGE_SHIFT pg_offset = offset_in_page(i_size); if (page->index > end_index || (page->index == end_index && !pg_offset)) { page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; } Get rid of the one in __extent_writepage_io(), which was obsoleted in 211c17f51f46 ("Fix corners in writepage and btrfs_truncate_page"). Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9475e81dc419..00ddefcb54c8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3455,11 +3455,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, update_nr_written(wbc, nr_written + 1); end = page_end; - if (i_size <= start) { - btrfs_writepage_endio_finish_ordered(page, start, page_end, 1); - goto done; - } - blocksize = inode->i_sb->s_blocksize; while (cur <= end) { @@ -3540,7 +3535,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, pg_offset += iosize; nr++; } -done: *nr_ret = nr; return ret; } -- cgit v1.2.3-59-g8ed1b From 39b07b5d7072f8e9fd8cc2f840d3749f86699bbb Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:23 -0800 Subject: btrfs: drop create parameter to btrfs_get_extent() We only pass this as 1 from __extent_writepage_io(). The parameter basically means "pretend I didn't pass in a page". This is silly since we can simply not pass in the page. Get rid of the parameter from btrfs_get_extent(), and since it's used as a get_extent_t callback, remove it from get_extent_t and btree_get_extent(), neither of which need it. While we're here, let's document btrfs_get_extent(). Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/disk-io.h | 4 ++-- fs/btrfs/extent_io.c | 6 +++--- fs/btrfs/extent_io.h | 6 ++---- fs/btrfs/file.c | 17 ++++++++--------- fs/btrfs/inode.c | 41 ++++++++++++++++++++++++----------------- fs/btrfs/ioctl.c | 2 +- fs/btrfs/tests/inode-tests.c | 44 +++++++++++++++++++++----------------------- 9 files changed, 64 insertions(+), 62 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f895fb490b75..e416ef6c9415 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2875,7 +2875,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root); struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, - u64 start, u64 end, int create); + u64 start, u64 end); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ab888d89d844..881aba162e4e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -202,8 +202,8 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, * that covers the entire device */ struct extent_map *btree_get_extent(struct btrfs_inode *inode, - struct page *page, size_t pg_offset, u64 start, u64 len, - int create) + struct page *page, size_t pg_offset, + u64 start, u64 len) { struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 76f123ebb292..8c2d6cf1ce59 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -134,8 +134,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, int btree_lock_page_hook(struct page *page, void *data, void (*flush_fn)(void *)); struct extent_map *btree_get_extent(struct btrfs_inode *inode, - struct page *page, size_t pg_offset, u64 start, u64 len, - int create); + struct page *page, size_t pg_offset, + u64 start, u64 len); int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags); int __init btrfs_end_io_wq_init(void); void __cold btrfs_end_io_wq_exit(void); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 00ddefcb54c8..bbfb102d65b8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3043,7 +3043,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, *em_cached = NULL; } - em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); + em = get_extent(BTRFS_I(inode), page, pg_offset, start, len); if (em_cached && !IS_ERR_OR_NULL(em)) { BUG_ON(*em_cached); refcount_inc(&em->refs); @@ -3466,8 +3466,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, page_end, 1); break; } - em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur, - end - cur + 1, 1); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur, + end - cur + 1); if (IS_ERR_OR_NULL(em)) { SetPageError(page); ret = PTR_ERR_OR_ZERO(em); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index a8551a1f56e2..5d205bbaafdc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -183,10 +183,8 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, - struct page *page, - size_t pg_offset, - u64 start, u64 len, - int create); + struct page *page, size_t pg_offset, + u64 start, u64 len); int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_buffer(struct page *page); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 76c68c70d3e2..a16da274c9aa 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -477,8 +477,7 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, u64 em_len; int ret = 0; - em = btrfs_get_extent(inode, NULL, 0, search_start, - search_len, 0); + em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); if (IS_ERR(em)) return PTR_ERR(em); @@ -2390,7 +2389,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, round_down(*start, fs_info->sectorsize), - round_up(*len, fs_info->sectorsize), 0); + round_up(*len, fs_info->sectorsize)); if (IS_ERR(em)) return PTR_ERR(em); @@ -2957,7 +2956,7 @@ static int btrfs_zero_range_check_range_boundary(struct inode *inode, int ret; offset = round_down(offset, sectorsize); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) return PTR_ERR(em); @@ -2990,8 +2989,8 @@ static int btrfs_zero_range(struct inode *inode, inode_dio_wait(inode); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, - alloc_start, alloc_end - alloc_start, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, + alloc_end - alloc_start); if (IS_ERR(em)) { ret = PTR_ERR(em); goto out; @@ -3034,8 +3033,8 @@ static int btrfs_zero_range(struct inode *inode, if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, - alloc_start, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, + sectorsize); if (IS_ERR(em)) { ret = PTR_ERR(em); goto out; @@ -3273,7 +3272,7 @@ static long btrfs_fallocate(struct file *file, int mode, INIT_LIST_HEAD(&reserve_list); while (cur_offset < alloc_end) { em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, - alloc_end - cur_offset, 0); + alloc_end - cur_offset); if (IS_ERR(em)) { ret = PTR_ERR(em); break; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0695d64df05b..bf5d7ce358b5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4504,7 +4504,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) cur_offset = hole_start; while (1) { em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, - block_end - cur_offset, 0); + block_end - cur_offset); if (IS_ERR(em)) { err = PTR_ERR(em); em = NULL; @@ -6283,18 +6283,27 @@ static noinline int uncompress_inline(struct btrfs_path *path, return ret; } -/* - * a bit scary, this does extent mapping from logical file offset to the disk. - * the ugly parts come from merging extents from the disk with the in-ram - * representation. This gets more complex because of the data=ordered code, - * where the in-ram extents might be locked pending data=ordered completion. +/** + * btrfs_get_extent - Lookup the first extent overlapping a range in a file. + * @inode: file to search in + * @page: page to read extent data into if the extent is inline + * @pg_offset: offset into @page to copy to + * @start: file offset + * @len: length of range starting at @start + * + * This returns the first &struct extent_map which overlaps with the given + * range, reading it from the B-tree and caching it if necessary. Note that + * there may be more extents which overlap the given range after the returned + * extent_map. * - * This also copies inline extents directly into the page. + * If @page is not NULL and the extent is inline, this also reads the extent + * data directly into the page and marks the extent up to date in the io_tree. + * + * Return: ERR_PTR on error, non-NULL extent_map on success. */ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, - struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) + struct page *page, size_t pg_offset, + u64 start, u64 len) { struct btrfs_fs_info *fs_info = inode->root->fs_info; int ret; @@ -6311,7 +6320,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct extent_map *em = NULL; struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_io_tree *io_tree = &inode->io_tree; - const bool new_inline = !page || create; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); @@ -6434,8 +6442,7 @@ next: goto insert; } - btrfs_extent_item_to_extent_map(inode, path, item, - new_inline, em); + btrfs_extent_item_to_extent_map(inode, path, item, !page, em); if (extent_type == BTRFS_FILE_EXTENT_REG || extent_type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -6447,7 +6454,7 @@ next: size_t extent_offset; size_t copy_size; - if (new_inline) + if (!page) goto out; size = btrfs_file_extent_ram_bytes(leaf, item); @@ -6530,7 +6537,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, u64 delalloc_end; int err = 0; - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(inode, NULL, 0, start, len); if (IS_ERR(em)) return em; /* @@ -7155,7 +7162,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto err; } - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; @@ -10166,7 +10173,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, struct btrfs_block_group *bg; u64 len = isize - start; - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 12ae31e1813e..1b1b6ff855aa 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1128,7 +1128,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) /* get the big lock and read metadata off disk */ lock_extent_bits(io_tree, start, end, &cached); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); unlock_extent_cached(io_tree, start, end, &cached); if (IS_ERR(em)) diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 09ecf7dc7b08..24a8c714f56c 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -263,7 +263,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) /* First with no extents */ BTRFS_I(inode)->root = root; - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize); if (IS_ERR(em)) { em = NULL; test_err("got an error when we shouldn't have"); @@ -283,7 +283,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) */ setup_file_extents(root, sectorsize); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -305,7 +305,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -333,7 +333,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -356,7 +356,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Regular extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -384,7 +384,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are split extents */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -413,7 +413,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -435,7 +435,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -469,7 +469,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Prealloc extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -498,7 +498,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are a half written prealloc extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -528,7 +528,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -561,7 +561,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -596,7 +596,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Now for the compressed extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -630,7 +630,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Split compressed extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -665,7 +665,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -692,7 +692,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -727,8 +727,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* A hole between regular extents but no hole extent */ - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, - sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -755,7 +754,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -788,7 +787,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -872,7 +871,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) insert_inode_item_key(root); insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; @@ -894,8 +893,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) } free_extent_map(em); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, - 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, 2 * sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; -- cgit v1.2.3-59-g8ed1b From c8b04030c5ec006e161969d77c07fb03a8b67112 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:24 -0800 Subject: btrfs: simplify compressed/inline check in __extent_writepage_io() Commit 7087a9d8db88 ("btrfs: Remove extent_io_ops::writepage_end_io_hook") left this logic in a confusing state. Simplify it. Reviewed-by: Johannes Thumshirn Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bbfb102d65b8..394beb474a69 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3492,22 +3492,11 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, */ if (compressed || block_start == EXTENT_MAP_HOLE || block_start == EXTENT_MAP_INLINE) { - /* - * end_io notification does not happen here for - * compressed extents - */ - if (!compressed) - btrfs_writepage_endio_finish_ordered(page, cur, - cur + iosize - 1, - 1); - else if (compressed) { - /* we don't want to end_page_writeback on - * a compressed extent. this happens - * elsewhere - */ + if (compressed) nr++; - } - + else + btrfs_writepage_endio_finish_ordered(page, cur, + cur + iosize - 1, 1); cur += iosize; pg_offset += iosize; continue; -- cgit v1.2.3-59-g8ed1b From 95690e58e1220e99e2a3ec9d5ebe7341fcc96745 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 2 Dec 2019 17:34:25 -0800 Subject: btrfs: remove struct find_free_extent.ram_bytes This hasn't been used since it was first introduced in commit b4bd745d1230 ("btrfs: Introduce find_free_extent_ctl structure for later rework"). Passing that to btrfs_add_reserved_bytes in find_free_extent is not strictly necessary and using the local ram_bytes instead seems cleaner. Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2a7dff22c3b7..180b9b81d01a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3438,7 +3438,6 @@ btrfs_release_block_group(struct btrfs_block_group *cache, */ struct find_free_extent_ctl { /* Basic allocation info */ - u64 ram_bytes; u64 num_bytes; u64 empty_size; u64 flags; @@ -3810,7 +3809,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, WARN_ON(num_bytes < fs_info->sectorsize); - ffe_ctl.ram_bytes = ram_bytes; ffe_ctl.num_bytes = num_bytes; ffe_ctl.empty_size = empty_size; ffe_ctl.flags = flags; -- cgit v1.2.3-59-g8ed1b From 0ab575c5df5e4a7eb3a80c682de1165189d888e0 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 19 Dec 2019 17:25:34 +0800 Subject: btrfs: Remove unneeded semicolon Fixes coccicheck warning: fs/btrfs/print-tree.c:320:3-4: Unneeded semicolon Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: David Sterba --- fs/btrfs/print-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 873b6b694107..61f44e78e3c9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -317,7 +317,7 @@ void btrfs_print_leaf(struct extent_buffer *l) print_uuid_item(l, btrfs_item_ptr_offset(l, i), btrfs_item_size_nr(l, i)); break; - }; + } } } -- cgit v1.2.3-59-g8ed1b From f6d2a5c263afca84646cf3300dc13061bedbd99e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 17 Dec 2019 18:58:20 +0800 Subject: btrfs: tree-checker: Check leaf chunk item size Inspired by btrfs-progs github issue #208, where chunk item in chunk tree has invalid num_stripes (0). Although that can already be caught by current btrfs_check_chunk_valid(), that function doesn't really check item size as it needs to handle chunk item in super block sys_chunk_array(). This patch will add two extra checks for chunk items in chunk tree: - Basic chunk item size If the item is smaller than btrfs_chunk (which already contains one stripe), exit right now as reading num_stripes may even go beyond eb boundary. - Item size check against num_stripes If item size doesn't match with calculated chunk size, then either the item size or the num_stripes is corrupted. Error out anyway. Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 97f3520b8d98..16b7b5408bd4 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -738,6 +738,44 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, return 0; } +/* + * Enhanced version of chunk item checker. + * + * The common btrfs_check_chunk_valid() doesn't check item size since it needs + * to work on super block sys_chunk_array which doesn't have full item ptr. + */ +static int check_leaf_chunk_item(struct extent_buffer *leaf, + struct btrfs_chunk *chunk, + struct btrfs_key *key, int slot) +{ + int num_stripes; + + if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect [%zu, %u)", + btrfs_item_size_nr(leaf, slot), + sizeof(struct btrfs_chunk), + BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); + return -EUCLEAN; + } + + num_stripes = btrfs_chunk_num_stripes(leaf, chunk); + /* Let btrfs_check_chunk_valid() handle this error type */ + if (num_stripes == 0) + goto out; + + if (btrfs_chunk_item_size(num_stripes) != + btrfs_item_size_nr(leaf, slot)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect %lu", + btrfs_item_size_nr(leaf, slot), + btrfs_chunk_item_size(num_stripes)); + return -EUCLEAN; + } +out: + return btrfs_check_chunk_valid(leaf, chunk, key->offset); +} + __printf(3, 4) __cold static void dev_item_err(const struct extent_buffer *eb, int slot, @@ -1384,7 +1422,7 @@ static int check_leaf_item(struct extent_buffer *leaf, break; case BTRFS_CHUNK_ITEM_KEY: chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); - ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); + ret = check_leaf_chunk_item(leaf, chunk, key, slot); break; case BTRFS_DEV_ITEM_KEY: ret = check_dev_item(leaf, key, slot); -- cgit v1.2.3-59-g8ed1b From c3053ebb0b8044d8360db0f6717f8fdcbde8c370 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 9 Dec 2019 18:54:32 +0800 Subject: btrfs: tree-checker: Clean up fs_info parameter from error message wrapper The @fs_info parameter can be extracted from extent_buffer structure, and there are already some wrappers getting rid of the @fs_info parameter. Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 16b7b5408bd4..b6f3dc713d5e 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -839,7 +839,7 @@ static int check_dev_item(struct extent_buffer *leaf, } /* Inode item error output has the same format as dir_item_err() */ -#define inode_item_err(fs_info, eb, slot, fmt, ...) \ +#define inode_item_err(eb, slot, fmt, ...) \ dir_item_err(eb, slot, fmt, __VA_ARGS__) static int check_inode_item(struct extent_buffer *leaf, @@ -864,7 +864,7 @@ static int check_inode_item(struct extent_buffer *leaf, return -EUCLEAN; } if (key->offset != 0) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "invalid key offset: has %llu expect 0", key->offset); return -EUCLEAN; @@ -873,7 +873,7 @@ static int check_inode_item(struct extent_buffer *leaf, /* Here we use super block generation + 1 to handle log tree */ if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "invalid inode generation: has %llu expect (0, %llu]", btrfs_inode_generation(leaf, iitem), super_gen + 1); @@ -881,7 +881,7 @@ static int check_inode_item(struct extent_buffer *leaf, } /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "invalid inode generation: has %llu expect [0, %llu]", btrfs_inode_transid(leaf, iitem), super_gen + 1); return -EUCLEAN; @@ -894,7 +894,7 @@ static int check_inode_item(struct extent_buffer *leaf, */ mode = btrfs_inode_mode(leaf, iitem); if (mode & ~valid_mask) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "unknown mode bit detected: 0x%x", mode & ~valid_mask); return -EUCLEAN; @@ -907,20 +907,20 @@ static int check_inode_item(struct extent_buffer *leaf, */ if (!has_single_bit_set(mode & S_IFMT)) { if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "invalid mode: has 0%o expect valid S_IF* bit(s)", mode & S_IFMT); return -EUCLEAN; } } if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "invalid nlink: has %u expect no more than 1 for dir", btrfs_inode_nlink(leaf, iitem)); return -EUCLEAN; } if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) { - inode_item_err(fs_info, leaf, slot, + inode_item_err(leaf, slot, "unknown flags detected: 0x%llx", btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK); @@ -1340,8 +1340,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf, return 0; } -#define inode_ref_err(fs_info, eb, slot, fmt, args...) \ - inode_item_err(fs_info, eb, slot, fmt, ##args) +#define inode_ref_err(eb, slot, fmt, args...) \ + inode_item_err(eb, slot, fmt, ##args) static int check_inode_ref(struct extent_buffer *leaf, struct btrfs_key *key, struct btrfs_key *prev_key, int slot) @@ -1354,7 +1354,7 @@ static int check_inode_ref(struct extent_buffer *leaf, return -EUCLEAN; /* namelen can't be 0, so item_size == sizeof() is also invalid */ if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { - inode_ref_err(fs_info, leaf, slot, + inode_ref_err(leaf, slot, "invalid item size, have %u expect (%zu, %u)", btrfs_item_size_nr(leaf, slot), sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); @@ -1367,7 +1367,7 @@ static int check_inode_ref(struct extent_buffer *leaf, u16 namelen; if (ptr + sizeof(iref) > end) { - inode_ref_err(fs_info, leaf, slot, + inode_ref_err(leaf, slot, "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", ptr, end, sizeof(iref)); return -EUCLEAN; @@ -1376,7 +1376,7 @@ static int check_inode_ref(struct extent_buffer *leaf, iref = (struct btrfs_inode_ref *)ptr; namelen = btrfs_inode_ref_name_len(leaf, iref); if (ptr + sizeof(*iref) + namelen > end) { - inode_ref_err(fs_info, leaf, slot, + inode_ref_err(leaf, slot, "inode ref overflow, ptr %lu end %lu namelen %u", ptr, end, namelen); return -EUCLEAN; -- cgit v1.2.3-59-g8ed1b From c23c77b097dc52d5c166720467a62058e4a3c058 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 9 Dec 2019 18:54:33 +0800 Subject: btrfs: tree-checker: Refactor inode key check into seperate function Inode key check is not as easy as several lines, and it will be called in more than one location (INODE_ITEM check and DIR_ITEM/DIR_INDEX/XATTR_ITEM location key check). So here refactor such check into check_inode_key(). And add extra checks for XATTR_ITEM. Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 78 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index b6f3dc713d5e..285c362b3121 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -373,6 +373,61 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, return 0; } +/* Inode item error output has the same format as dir_item_err() */ +#define inode_item_err(eb, slot, fmt, ...) \ + dir_item_err(eb, slot, fmt, __VA_ARGS__) + +static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key, + int slot) +{ + struct btrfs_key item_key; + bool is_inode_item; + + btrfs_item_key_to_cpu(leaf, &item_key, slot); + is_inode_item = (item_key.type == BTRFS_INODE_ITEM_KEY); + + /* For XATTR_ITEM, location key should be all 0 */ + if (item_key.type == BTRFS_XATTR_ITEM_KEY) { + if (key->type != 0 || key->objectid != 0 || key->offset != 0) + return -EUCLEAN; + return 0; + } + + if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID || + key->objectid > BTRFS_LAST_FREE_OBJECTID) && + key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID && + key->objectid != BTRFS_FREE_INO_OBJECTID) { + if (is_inode_item) { + generic_err(leaf, slot, + "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu", + key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, + BTRFS_FIRST_FREE_OBJECTID, + BTRFS_LAST_FREE_OBJECTID, + BTRFS_FREE_INO_OBJECTID); + } else { + dir_item_err(leaf, slot, +"invalid location key objectid: has %llu expect %llu or [%llu, %llu] or %llu", + key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, + BTRFS_FIRST_FREE_OBJECTID, + BTRFS_LAST_FREE_OBJECTID, + BTRFS_FREE_INO_OBJECTID); + } + return -EUCLEAN; + } + if (key->offset != 0) { + if (is_inode_item) + inode_item_err(leaf, slot, + "invalid key offset: has %llu expect 0", + key->offset); + else + dir_item_err(leaf, slot, + "invalid location key offset:has %llu expect 0", + key->offset); + return -EUCLEAN; + } + return 0; +} + static int check_dir_item(struct extent_buffer *leaf, struct btrfs_key *key, struct btrfs_key *prev_key, int slot) @@ -850,25 +905,12 @@ static int check_inode_item(struct extent_buffer *leaf, u64 super_gen = btrfs_super_generation(fs_info->super_copy); u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); u32 mode; + int ret; + + ret = check_inode_key(leaf, key, slot); + if (ret < 0) + return ret; - if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID || - key->objectid > BTRFS_LAST_FREE_OBJECTID) && - key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID && - key->objectid != BTRFS_FREE_INO_OBJECTID) { - generic_err(leaf, slot, - "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu", - key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, - BTRFS_FIRST_FREE_OBJECTID, - BTRFS_LAST_FREE_OBJECTID, - BTRFS_FREE_INO_OBJECTID); - return -EUCLEAN; - } - if (key->offset != 0) { - inode_item_err(leaf, slot, - "invalid key offset: has %llu expect 0", - key->offset); - return -EUCLEAN; - } iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); /* Here we use super block generation + 1 to handle log tree */ -- cgit v1.2.3-59-g8ed1b From 57a0e67491914f6187b1847ee3b1a01a556b9238 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 9 Dec 2019 18:54:34 +0800 Subject: btrfs: tree-checker: Refactor root key check into separate function ROOT_ITEM key check itself is not as simple as single line check, and will be reused for both ROOT_ITEM and DIR_ITEM/DIR_INDEX location key check, so refactor such check into check_root_key(). Also since we are here, fix a comment error about ROOT_ITEM offset, which is transid of snapshot creation, not some "older kernel behavior". Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 62 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 285c362b3121..9fc438be4bd9 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -428,6 +428,49 @@ static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key, return 0; } +static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, + int slot) +{ + struct btrfs_key item_key; + bool is_root_item; + + btrfs_item_key_to_cpu(leaf, &item_key, slot); + is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY); + + /* No such tree id */ + if (key->objectid == 0) { + if (is_root_item) + generic_err(leaf, slot, "invalid root id 0"); + else + dir_item_err(leaf, slot, + "invalid location key root id 0"); + return -EUCLEAN; + } + + /* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */ + if (!is_fstree(key->objectid) && !is_root_item) { + dir_item_err(leaf, slot, + "invalid location key objectid, have %llu expect [%llu, %llu]", + key->objectid, BTRFS_FIRST_FREE_OBJECTID, + BTRFS_LAST_FREE_OBJECTID); + return -EUCLEAN; + } + + /* + * ROOT_ITEM with non-zero offset means this is a snapshot, created at + * @offset transid. + * Furthermore, for location key in DIR_ITEM, its offset is always -1. + * + * So here we only check offset for reloc tree whose key->offset must + * be a valid tree. + */ + if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { + generic_err(leaf, slot, "invalid root id 0 for reloc tree"); + return -EUCLEAN; + } + return 0; +} + static int check_dir_item(struct extent_buffer *leaf, struct btrfs_key *key, struct btrfs_key *prev_key, int slot) @@ -978,22 +1021,11 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, struct btrfs_root_item ri; const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | BTRFS_ROOT_SUBVOL_DEAD; + int ret; - /* No such tree id */ - if (key->objectid == 0) { - generic_err(leaf, slot, "invalid root id 0"); - return -EUCLEAN; - } - - /* - * Some older kernel may create ROOT_ITEM with non-zero offset, so here - * we only check offset for reloc tree whose key->offset must be a - * valid tree. - */ - if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { - generic_err(leaf, slot, "invalid root id 0 for reloc tree"); - return -EUCLEAN; - } + ret = check_root_key(leaf, key, slot); + if (ret < 0) + return ret; if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { generic_err(leaf, slot, -- cgit v1.2.3-59-g8ed1b From 147a097cf035ef7225542605f4a61d9fab70dc84 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 9 Dec 2019 18:54:35 +0800 Subject: btrfs: tree-checker: Verify location key for DIR_ITEM/DIR_INDEX [PROBLEM] There is a user report in the mail list, showing the following corrupted tree blocks: item 62 key (486836 DIR_ITEM 2543451757) itemoff 6273 itemsize 74 location key (4065004 INODE_ITEM 1073741824) type FILE transid 21397 data_len 0 name_len 44 name: FILENAME Note that location key, its offset should be 0 for all INODE_ITEMS. This caused failed lookup of the inode. [CAUSE] That offending value, 1073741824, is 0x40000000. So this looks like a memory bit flip. [FIX] This patch will enhance tree-checker to check location key of DIR_INDEX/DIR_ITEM/XATTR_ITEM. There are several different combinations needs to check: - item_key.type == DIR_INDEX/DIR_ITEM * location_key.type == BTRFS_INODE_ITEM_KEY This location_key should follow the check in inode_item check. * location_key.type == BTRFS_ROOT_ITEM_KEY Despite the existing check, DIR_INDEX/DIR_ITEM can only points to subvolume trees. * All other keys are not allowed. - item_key.type == XATTR_ITEM location_key should be all 0. Reported-by: Mike Gilbert Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 9fc438be4bd9..a92f8a6dd192 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -484,12 +484,14 @@ static int check_dir_item(struct extent_buffer *leaf, return -EUCLEAN; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); while (cur < item_size) { + struct btrfs_key location_key; u32 name_len; u32 data_len; u32 max_name_len; u32 total_size; u32 name_hash; u8 dir_type; + int ret; /* header itself should not cross item boundary */ if (cur + sizeof(*di) > item_size) { @@ -499,6 +501,25 @@ static int check_dir_item(struct extent_buffer *leaf, return -EUCLEAN; } + /* Location key check */ + btrfs_dir_item_key_to_cpu(leaf, di, &location_key); + if (location_key.type == BTRFS_ROOT_ITEM_KEY) { + ret = check_root_key(leaf, &location_key, slot); + if (ret < 0) + return ret; + } else if (location_key.type == BTRFS_INODE_ITEM_KEY || + location_key.type == 0) { + ret = check_inode_key(leaf, &location_key, slot); + if (ret < 0) + return ret; + } else { + dir_item_err(leaf, slot, + "invalid location key type, have %u, expect %u or %u", + location_key.type, BTRFS_ROOT_ITEM_KEY, + BTRFS_INODE_ITEM_KEY); + return -EUCLEAN; + } + /* dir type check */ dir_type = btrfs_dir_type(leaf, di); if (dir_type >= BTRFS_FT_MAX) { -- cgit v1.2.3-59-g8ed1b From e837dfde15a49c97dcbb059757d96c71e9e7bd54 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:10 -0800 Subject: bitmap: genericize percpu bitmap region iterators Bitmaps are fairly popular for their space efficiency, but we don't have generic iterators available. Make percpu's bitmap region iterators available to everyone. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/linux/bitmap.h | 35 +++++++++++++++++++++++++++++ mm/percpu.c | 61 +++++++++++++------------------------------------- 2 files changed, 51 insertions(+), 45 deletions(-) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index ff335b22f23c..cb63feb3cfbe 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -456,6 +456,41 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen, return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } +static inline void bitmap_next_clear_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_zero_bit(bitmap, end, *rs); + *re = find_next_bit(bitmap, end, *rs + 1); +} + +static inline void bitmap_next_set_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_bit(bitmap, end, *rs); + *re = find_next_zero_bit(bitmap, end, *rs + 1); +} + +/* + * Bitmap region iterators. Iterates over the bitmap between [@start, @end). + * @rs and @re should be integer variables and will be set to start and end + * index of the current clear or set region. + */ +#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \ + for ((rs) = (start), \ + bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \ + (rs) < (re); \ + (rs) = (re) + 1, \ + bitmap_next_clear_region((bitmap), &(rs), &(re), (end))) + +#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \ + for ((rs) = (start), \ + bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \ + (rs) < (re); \ + (rs) = (re) + 1, \ + bitmap_next_set_region((bitmap), &(rs), &(re), (end))) + /** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * @n: u64 value diff --git a/mm/percpu.c b/mm/percpu.c index 7e06a1e58720..e9844086b236 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -270,33 +270,6 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, pcpu_unit_page_offset(cpu, page_idx); } -static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) -{ - *rs = find_next_zero_bit(bitmap, end, *rs); - *re = find_next_bit(bitmap, end, *rs + 1); -} - -static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) -{ - *rs = find_next_bit(bitmap, end, *rs); - *re = find_next_zero_bit(bitmap, end, *rs + 1); -} - -/* - * Bitmap region iterators. Iterates over the bitmap between - * [@start, @end) in @chunk. @rs and @re should be integer variables - * and will be set to start and end index of the current free region. - */ -#define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) - -#define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end))) - /* * The following are helper functions to help access bitmaps and convert * between bitmap offsets to address offsets. @@ -732,9 +705,8 @@ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) } bits = 0; - pcpu_for_each_md_free_region(chunk, bit_off, bits) { + pcpu_for_each_md_free_region(chunk, bit_off, bits) pcpu_block_update(chunk_md, bit_off, bit_off + bits); - } } /** @@ -749,7 +721,7 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) { struct pcpu_block_md *block = chunk->md_blocks + index; unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); - int rs, re, start; /* region start, region end */ + unsigned int rs, re, start; /* region start, region end */ /* promote scan_hint to contig_hint */ if (block->scan_hint) { @@ -765,10 +737,9 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) block->right_free = 0; /* iterate over free areas and update the contig hints */ - pcpu_for_each_unpop_region(alloc_map, rs, re, start, - PCPU_BITMAP_BLOCK_BITS) { + bitmap_for_each_clear_region(alloc_map, rs, re, start, + PCPU_BITMAP_BLOCK_BITS) pcpu_block_update(block, rs, re); - } } /** @@ -1041,13 +1012,13 @@ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) { - int page_start, page_end, rs, re; + unsigned int page_start, page_end, rs, re; page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); rs = page_start; - pcpu_next_unpop(chunk->populated, &rs, &re, page_end); + bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); if (rs >= page_end) return true; @@ -1702,13 +1673,13 @@ area_found: /* populate if not all pages are already there */ if (!is_atomic) { - int page_start, page_end, rs, re; + unsigned int page_start, page_end, rs, re; page_start = PFN_DOWN(off); page_end = PFN_UP(off + size); - pcpu_for_each_unpop_region(chunk->populated, rs, re, - page_start, page_end) { + bitmap_for_each_clear_region(chunk->populated, rs, re, + page_start, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); @@ -1858,10 +1829,10 @@ static void pcpu_balance_workfn(struct work_struct *work) spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &to_free, list) { - int rs, re; + unsigned int rs, re; - pcpu_for_each_pop_region(chunk->populated, rs, re, 0, - chunk->nr_pages) { + bitmap_for_each_set_region(chunk->populated, rs, re, 0, + chunk->nr_pages) { pcpu_depopulate_chunk(chunk, rs, re); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, rs, re); @@ -1893,7 +1864,7 @@ retry_pop: } for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { - int nr_unpop = 0, rs, re; + unsigned int nr_unpop = 0, rs, re; if (!nr_to_pop) break; @@ -1910,9 +1881,9 @@ retry_pop: continue; /* @chunk can't go away while pcpu_alloc_mutex is held */ - pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, - chunk->nr_pages) { - int nr = min(re - rs, nr_to_pop); + bitmap_for_each_clear_region(chunk->populated, rs, re, 0, + chunk->nr_pages) { + int nr = min_t(int, re - rs, nr_to_pop); ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); if (!ret) { -- cgit v1.2.3-59-g8ed1b From 46b27f5059e6ce7a7e3805d53144b37897723e3b Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:11 -0800 Subject: btrfs: rename DISCARD mount option to to DISCARD_SYNC This series introduces async discard which will use the flag DISCARD_ASYNC, so rename the original flag to DISCARD_SYNC as it is synchronously done in transaction commit. Reviewed-by: Josef Bacik Reviewed-by: Johannes Thumshirn Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 2 +- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/inode.c | 2 +- fs/btrfs/super.c | 8 ++++---- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 66fa39632cde..be1938dc94fd 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1349,7 +1349,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&space_info->lock); /* DISCARD can flip during remount */ - trimming = btrfs_test_opt(fs_info, DISCARD); + trimming = btrfs_test_opt(fs_info, DISCARD_SYNC); /* Implicit trim during transaction commit. */ if (trimming) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e416ef6c9415..2f6c21ea84af 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1170,7 +1170,7 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) #define BTRFS_MOUNT_NOSSD (1 << 9) -#define BTRFS_MOUNT_DISCARD (1 << 10) +#define BTRFS_MOUNT_DISCARD_SYNC (1 << 10) #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 180b9b81d01a..1a8bf943c3e7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2923,7 +2923,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) break; } - if (btrfs_test_opt(fs_info, DISCARD)) + if (btrfs_test_opt(fs_info, DISCARD_SYNC)) ret = btrfs_discard_extent(fs_info, start, end + 1 - start, NULL); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bf5d7ce358b5..6d2bb58d277a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2584,7 +2584,7 @@ out: * Discard the range before returning it back to the * free space pool */ - if (ret && btrfs_test_opt(fs_info, DISCARD)) + if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) btrfs_discard_extent(fs_info, ordered_extent->disk_bytenr, ordered_extent->disk_num_bytes, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f452a94abdc3..08ac6a7a67f0 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -695,11 +695,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->metadata_ratio); break; case Opt_discard: - btrfs_set_and_info(info, DISCARD, - "turning on discard"); + btrfs_set_and_info(info, DISCARD_SYNC, + "turning on sync discard"); break; case Opt_nodiscard: - btrfs_clear_and_info(info, DISCARD, + btrfs_clear_and_info(info, DISCARD_SYNC, "turning off discard"); break; case Opt_space_cache: @@ -1322,7 +1322,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",nologreplay"); if (btrfs_test_opt(info, FLUSHONCOMMIT)) seq_puts(seq, ",flushoncommit"); - if (btrfs_test_opt(info, DISCARD)) + if (btrfs_test_opt(info, DISCARD_SYNC)) seq_puts(seq, ",discard"); if (!(info->sb->s_flags & SB_POSIXACL)) seq_puts(seq, ",noacl"); -- cgit v1.2.3-59-g8ed1b From a7ccb255852413dd59263e551fd0ef13f76fc9b9 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:12 -0800 Subject: btrfs: keep track of which extents have been discarded Async discard will use the free space cache as backing knowledge for which extents to discard. This patch plumbs knowledge about which extents need to be discarded into the free space cache from unpin_extent_range(). An untrimmed extent can merge with everything as this is a new region. Absorbing trimmed extents is a tradeoff to for greater coalescing which makes life better for find_free_extent(). Additionally, it seems the size of a trim isn't as problematic as the trim io itself. When reading in the free space cache from disk, if sync is set, mark all extents as trimmed. The current code ensures at transaction commit that all free space is trimmed when sync is set, so this reflects that. Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 63 ++++++++++++++++++++++++++++++++++++++++----- fs/btrfs/free-space-cache.h | 17 +++++++++++- fs/btrfs/inode-map.c | 13 +++++----- 3 files changed, 79 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3283da419200..669809d5214e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -752,6 +752,14 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, goto free_cache; } + /* + * Sync discard ensures that the free space cache is always + * trimmed. So when reading this in, the state should reflect + * that. + */ + if (btrfs_test_opt(fs_info, DISCARD_SYNC)) + e->trim_state = BTRFS_TRIM_STATE_TRIMMED; + if (!e->bytes) { kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; @@ -2161,6 +2169,22 @@ out: return ret; } +/* + * Free space merging rules: + * 1) Merge trimmed areas together + * 2) Let untrimmed areas coalesce with trimmed areas + * 3) Always pull neighboring regions from bitmaps + * + * The above rules are for when we merge free space based on btrfs_trim_state. + * Rules 2 and 3 are subtle because they are suboptimal, but are done for the + * same reason: to promote larger extent regions which makes life easier for + * find_free_extent(). Rule 2 enables coalescing based on the common path + * being returning free space from btrfs_finish_extent_commit(). So when free + * space is trimmed, it will prevent aggregating trimmed new region and + * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents + * and provide find_free_extent() with the largest extents possible hoping for + * the reuse path. + */ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { @@ -2169,6 +2193,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, bool merged = false; u64 offset = info->offset; u64 bytes = info->bytes; + const bool is_trimmed = btrfs_free_space_trimmed(info); /* * first we want to see if there is free space adjacent to the range we @@ -2182,7 +2207,9 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, else left_info = tree_search_offset(ctl, offset - 1, 0, 0); - if (right_info && !right_info->bitmap) { + /* See try_merge_free_space() comment. */ + if (right_info && !right_info->bitmap && + (!is_trimmed || btrfs_free_space_trimmed(right_info))) { if (update_stat) unlink_free_space(ctl, right_info); else @@ -2192,8 +2219,10 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, merged = true; } + /* See try_merge_free_space() comment. */ if (left_info && !left_info->bitmap && - left_info->offset + left_info->bytes == offset) { + left_info->offset + left_info->bytes == offset && + (!is_trimmed || btrfs_free_space_trimmed(left_info))) { if (update_stat) unlink_free_space(ctl, left_info); else @@ -2229,6 +2258,10 @@ static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl, bytes = (j - i) * ctl->unit; info->bytes += bytes; + /* See try_merge_free_space() comment. */ + if (!btrfs_free_space_trimmed(bitmap)) + info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + if (update_stat) bitmap_clear_bits(ctl, bitmap, end, bytes); else @@ -2282,6 +2315,10 @@ static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl, info->offset -= bytes; info->bytes += bytes; + /* See try_merge_free_space() comment. */ + if (!btrfs_free_space_trimmed(bitmap)) + info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + if (update_stat) bitmap_clear_bits(ctl, bitmap, info->offset, bytes); else @@ -2331,7 +2368,8 @@ static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl, int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_free_space_ctl *ctl, - u64 offset, u64 bytes) + u64 offset, u64 bytes, + enum btrfs_trim_state trim_state) { struct btrfs_free_space *info; int ret = 0; @@ -2342,6 +2380,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, info->offset = offset; info->bytes = bytes; + info->trim_state = trim_state; RB_CLEAR_NODE(&info->offset_index); spin_lock(&ctl->tree_lock); @@ -2387,9 +2426,14 @@ out: int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size) { + enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + + if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) + trim_state = BTRFS_TRIM_STATE_TRIMMED; + return __btrfs_add_free_space(block_group->fs_info, block_group->free_space_ctl, - bytenr, size); + bytenr, size, trim_state); } int btrfs_remove_free_space(struct btrfs_block_group *block_group, @@ -2464,8 +2508,10 @@ again: } spin_unlock(&ctl->tree_lock); - ret = btrfs_add_free_space(block_group, offset + bytes, - old_end - (offset + bytes)); + ret = __btrfs_add_free_space(block_group->fs_info, ctl, + offset + bytes, + old_end - (offset + bytes), + info->trim_state); WARN_ON(ret); goto out; } @@ -2634,6 +2680,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 ret = 0; u64 align_gap = 0; u64 align_gap_len = 0; + enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED; spin_lock(&ctl->tree_lock); entry = find_free_space(ctl, &offset, &bytes_search, @@ -2650,6 +2697,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, unlink_free_space(ctl, entry); align_gap_len = offset - entry->offset; align_gap = entry->offset; + align_gap_trim_state = entry->trim_state; entry->offset = offset + bytes; WARN_ON(entry->bytes < bytes + align_gap_len); @@ -2665,7 +2713,8 @@ out: if (align_gap_len) __btrfs_add_free_space(block_group->fs_info, ctl, - align_gap, align_gap_len); + align_gap, align_gap_len, + align_gap_trim_state); return ret; } diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index ba9a23241101..66c073f854dc 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -6,6 +6,14 @@ #ifndef BTRFS_FREE_SPACE_CACHE_H #define BTRFS_FREE_SPACE_CACHE_H +/* + * This is the trim state of an extent or bitmap. + */ +enum btrfs_trim_state { + BTRFS_TRIM_STATE_UNTRIMMED, + BTRFS_TRIM_STATE_TRIMMED, +}; + struct btrfs_free_space { struct rb_node offset_index; u64 offset; @@ -13,8 +21,14 @@ struct btrfs_free_space { u64 max_extent_size; unsigned long *bitmap; struct list_head list; + enum btrfs_trim_state trim_state; }; +static inline bool btrfs_free_space_trimmed(struct btrfs_free_space *info) +{ + return (info->trim_state == BTRFS_TRIM_STATE_TRIMMED); +} + struct btrfs_free_space_ctl { spinlock_t tree_lock; struct rb_root free_space_offset; @@ -83,7 +97,8 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group); int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_free_space_ctl *ctl, - u64 bytenr, u64 size); + u64 bytenr, u64 size, + enum btrfs_trim_state trim_state); int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group *block_group, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 37345fb6191d..d5c9c69d8263 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -107,7 +107,7 @@ again: if (last != (u64)-1 && last + 1 != key.objectid) { __btrfs_add_free_space(fs_info, ctl, last + 1, - key.objectid - last - 1); + key.objectid - last - 1, 0); wake_up(&root->ino_cache_wait); } @@ -118,7 +118,7 @@ next: if (last < root->highest_objectid - 1) { __btrfs_add_free_space(fs_info, ctl, last + 1, - root->highest_objectid - last - 1); + root->highest_objectid - last - 1, 0); } spin_lock(&root->ino_cache_lock); @@ -175,7 +175,8 @@ static void start_caching(struct btrfs_root *root) ret = btrfs_find_free_objectid(root, &objectid); if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { __btrfs_add_free_space(fs_info, ctl, objectid, - BTRFS_LAST_FREE_OBJECTID - objectid + 1); + BTRFS_LAST_FREE_OBJECTID - objectid + 1, + 0); wake_up(&root->ino_cache_wait); } @@ -221,7 +222,7 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid) return; again: if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { - __btrfs_add_free_space(fs_info, pinned, objectid, 1); + __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0); } else { down_write(&fs_info->commit_root_sem); spin_lock(&root->ino_cache_lock); @@ -234,7 +235,7 @@ again: start_caching(root); - __btrfs_add_free_space(fs_info, pinned, objectid, 1); + __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0); up_write(&fs_info->commit_root_sem); } @@ -281,7 +282,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) spin_unlock(rbroot_lock); if (count) __btrfs_add_free_space(root->fs_info, ctl, - info->offset, count); + info->offset, count, 0); kmem_cache_free(btrfs_free_space_cachep, info); } } -- cgit v1.2.3-59-g8ed1b From da080fe1bad4777b02f6a3db42823a8797aadbca Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:13 -0800 Subject: btrfs: keep track of free space bitmap trim status cleanliness There is a cap in btrfs in the amount of free extents that a block group can have. When it surpasses that threshold, future extents are placed into bitmaps. Instead of keeping track of if a certain bit is trimmed or not in a second bitmap, keep track of the relative state of the bitmap. With async discard, trimming bitmaps becomes a more frequent operation. As a trade off with simplicity, we keep track of if discarding a bitmap is in progress. If we fully scan a bitmap and trim as necessary, the bitmap is marked clean. This has some caveats as the min block size may skip over regions deemed too small. But this should be a reasonable trade off rather than keeping a second bitmap and making allocation paths more complex. The downside is we may overtrim, but ideally the min block size should prevent us from doing that too often and getting stuck trimming pathological cases. BTRFS_TRIM_STATE_TRIMMING is added to indicate a bitmap is in the process of being trimmed. If additional free space is added to that bitmap, the bit is cleared. A bitmap will be marked BTRFS_TRIM_STATE_TRIMMED if the trimming code was able to reach the end of it and the former is still set. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 89 ++++++++++++++++++++++++++++++++++++++++----- fs/btrfs/free-space-cache.h | 12 ++++++ 2 files changed, 92 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 669809d5214e..063f4db44024 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1979,11 +1979,18 @@ again: static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, - u64 bytes) + u64 bytes, enum btrfs_trim_state trim_state) { u64 bytes_to_set = 0; u64 end; + /* + * This is a tradeoff to make bitmap trim state minimal. We mark the + * whole bitmap untrimmed if at any point we add untrimmed regions. + */ + if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) + info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); bytes_to_set = min(end - offset, bytes); @@ -2058,10 +2065,12 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_block_group *block_group = NULL; int added = 0; u64 bytes, offset, bytes_added; + enum btrfs_trim_state trim_state; int ret; bytes = info->bytes; offset = info->offset; + trim_state = info->trim_state; if (!ctl->op->use_bitmap(ctl, info)) return 0; @@ -2096,8 +2105,8 @@ again: } if (entry->offset == offset_to_bitmap(ctl, offset)) { - bytes_added = add_bytes_to_bitmap(ctl, entry, - offset, bytes); + bytes_added = add_bytes_to_bitmap(ctl, entry, offset, + bytes, trim_state); bytes -= bytes_added; offset += bytes_added; } @@ -2116,7 +2125,8 @@ no_cluster_bitmap: goto new_bitmap; } - bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); + bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, + trim_state); bytes -= bytes_added; offset += bytes_added; added = 0; @@ -2150,6 +2160,7 @@ new_bitmap: /* allocate the bitmap */ info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); + info->trim_state = BTRFS_TRIM_STATE_TRIMMED; spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; @@ -3324,6 +3335,37 @@ out: return ret; } +/* + * If we break out of trimming a bitmap prematurely, we should reset the + * trimming bit. In a rather contrieved case, it's possible to race here so + * reset the state to BTRFS_TRIM_STATE_UNTRIMMED. + * + * start = start of bitmap + * end = near end of bitmap + * + * Thread 1: Thread 2: + * trim_bitmaps(start) + * trim_bitmaps(end) + * end_trimming_bitmap() + * reset_trimming_bitmap() + */ +static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) +{ + struct btrfs_free_space *entry; + + spin_lock(&ctl->tree_lock); + entry = tree_search_offset(ctl, offset, 1, 0); + if (entry) + entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + spin_unlock(&ctl->tree_lock); +} + +static void end_trimming_bitmap(struct btrfs_free_space *entry) +{ + if (btrfs_free_space_trimming_bitmap(entry)) + entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; +} + static int trim_bitmaps(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen) { @@ -3348,16 +3390,33 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, } entry = tree_search_offset(ctl, offset, 1, 0); - if (!entry) { + if (!entry || btrfs_free_space_trimmed(entry)) { spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); next_bitmap = true; goto next; } + /* + * Async discard bitmap trimming begins at by setting the start + * to be key.objectid and the offset_to_bitmap() aligns to the + * start of the bitmap. This lets us know we are fully + * scanning the bitmap rather than only some portion of it. + */ + if (start == offset) + entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; + bytes = minlen; ret2 = search_bitmap(ctl, entry, &start, &bytes, false); if (ret2 || start >= end) { + /* + * This keeps the invariant that all bytes are trimmed + * if BTRFS_TRIM_STATE_TRIMMED is set on a bitmap. + */ + if (ret2 && !minlen) + end_trimming_bitmap(entry); + else + entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); next_bitmap = true; @@ -3366,6 +3425,7 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, bytes = min(bytes, end - start); if (bytes < minlen) { + entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); goto next; @@ -3383,18 +3443,21 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, ret = do_trimming(block_group, total_trimmed, start, bytes, start, bytes, &trim_entry); - if (ret) + if (ret) { + reset_trimming_bitmap(ctl, offset); break; + } next: if (next_bitmap) { offset += BITS_PER_BITMAP * ctl->unit; + start = offset; } else { start += bytes; - if (start >= offset + BITS_PER_BITMAP * ctl->unit) - offset += BITS_PER_BITMAP * ctl->unit; } if (fatal_signal_pending(current)) { + if (start != offset) + reset_trimming_bitmap(ctl, offset); ret = -ERESTARTSYS; break; } @@ -3448,7 +3511,9 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group) int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; int ret; + u64 rem = 0; *trimmed = 0; @@ -3465,6 +3530,10 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, goto out; ret = trim_bitmaps(block_group, trimmed, start, end, minlen); + div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); + /* If we ended in the middle of a bitmap, reset the trimming flag */ + if (rem) + reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end)); out: btrfs_put_block_group_trimming(block_group); return ret; @@ -3649,6 +3718,7 @@ int test_add_free_space_entry(struct btrfs_block_group *cache, struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; struct btrfs_free_space *info = NULL, *bitmap_info; void *map = NULL; + enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED; u64 bytes_added; int ret; @@ -3690,7 +3760,8 @@ again: info = NULL; } - bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); + bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, + trim_state); bytes -= bytes_added; offset += bytes_added; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 66c073f854dc..5f25d94c9946 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -8,10 +8,16 @@ /* * This is the trim state of an extent or bitmap. + * + * BTRFS_TRIM_STATE_TRIMMING is special and used to maintain the state of a + * bitmap as we may need several trims to fully trim a single bitmap entry. + * This is reset should any free space other than trimmed space be added to the + * bitmap. */ enum btrfs_trim_state { BTRFS_TRIM_STATE_UNTRIMMED, BTRFS_TRIM_STATE_TRIMMED, + BTRFS_TRIM_STATE_TRIMMING, }; struct btrfs_free_space { @@ -29,6 +35,12 @@ static inline bool btrfs_free_space_trimmed(struct btrfs_free_space *info) return (info->trim_state == BTRFS_TRIM_STATE_TRIMMED); } +static inline bool btrfs_free_space_trimming_bitmap( + struct btrfs_free_space *info) +{ + return (info->trim_state == BTRFS_TRIM_STATE_TRIMMING); +} + struct btrfs_free_space_ctl { spinlock_t tree_lock; struct rb_root free_space_offset; -- cgit v1.2.3-59-g8ed1b From b0643e59cfa609c4b5f246f2b2c33b078f87e9d9 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:14 -0800 Subject: btrfs: add the beginning of async discard, discard workqueue When discard is enabled, everytime a pinned extent is released back to the block_group's free space cache, a discard is issued for the extent. This is an overeager approach when it comes to discarding and helping the SSD maintain enough free space to prevent severe garbage collection situations. This adds the beginning of async discard. Instead of issuing a discard prior to returning it to the free space, it is just marked as untrimmed. The block_group is then added to a LRU which then feeds into a workqueue to issue discards at a much slower rate. Full discarding of unused block groups is still done and will be addressed in a future patch of the series. For now, we don't persist the discard state of extents and bitmaps. Therefore, our failure recovery mode will be to consider extents untrimmed. This lets us handle failure and unmounting as one in the same. On a number of Facebook webservers, I collected data every minute accounting the time we spent in btrfs_finish_extent_commit() (col. 1) and in btrfs_commit_transaction() (col. 2). btrfs_finish_extent_commit() is where we discard extents synchronously before returning them to the free space cache. discard=sync: p99 total per minute p99 total per minute Drive | extent_commit() (ms) | commit_trans() (ms) --------------------------------------------------------------- Drive A | 434 | 1170 Drive B | 880 | 2330 Drive C | 2943 | 3920 Drive D | 4763 | 5701 discard=async: p99 total per minute p99 total per minute Drive | extent_commit() (ms) | commit_trans() (ms) -------------------------------------------------------------- Drive A | 134 | 956 Drive B | 64 | 1972 Drive C | 59 | 1032 Drive D | 62 | 1200 While it's not great that the stats are cumulative over 1m, all of these servers are running the same workload and and the delta between the two are substantial. We are spending significantly less time in btrfs_finish_extent_commit() which is responsible for discarding. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/Makefile | 2 +- fs/btrfs/block-group.c | 37 +++++- fs/btrfs/block-group.h | 9 ++ fs/btrfs/ctree.h | 21 ++++ fs/btrfs/discard.c | 273 ++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/discard.h | 23 ++++ fs/btrfs/disk-io.c | 13 ++- fs/btrfs/extent-tree.c | 4 + fs/btrfs/free-space-cache.c | 54 ++++++++- fs/btrfs/free-space-cache.h | 2 + fs/btrfs/super.c | 35 +++++- fs/btrfs/volumes.c | 8 ++ 12 files changed, 468 insertions(+), 13 deletions(-) create mode 100644 fs/btrfs/discard.c create mode 100644 fs/btrfs/discard.h diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 82200dbca5ac..9a0ff3384381 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ - block-rsv.o delalloc-space.o block-group.o + block-rsv.o delalloc-space.o block-group.o discard.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index be1938dc94fd..6ba15c45e779 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -14,6 +14,7 @@ #include "sysfs.h" #include "tree-log.h" #include "delalloc-space.h" +#include "discard.h" /* * Return target flags in extended format or 0 if restripe for this chunk_type @@ -131,6 +132,15 @@ void btrfs_put_block_group(struct btrfs_block_group *cache) WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); + /* + * A block_group shouldn't be on the discard_list anymore. + * Remove the block_group from the discard_list to prevent us + * from causing a panic due to NULL pointer dereference. + */ + if (WARN_ON(!list_empty(&cache->discard_list))) + btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, + cache); + /* * If not empty, someone is still holding mutex of * full_stripe_lock, which can only be released by caller. @@ -466,8 +476,8 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end } else if (extent_start > start && extent_start < end) { size = extent_start - start; total_added += size; - ret = btrfs_add_free_space(block_group, start, - size); + ret = btrfs_add_free_space_async_trimmed(block_group, + start, size); BUG_ON(ret); /* -ENOMEM or logic error */ start = extent_end + 1; } else { @@ -478,7 +488,8 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end if (start < end) { size = end - start; total_added += size; - ret = btrfs_add_free_space(block_group, start, size); + ret = btrfs_add_free_space_async_trimmed(block_group, start, + size); BUG_ON(ret); /* -ENOMEM or logic error */ } @@ -1258,6 +1269,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) } spin_unlock(&fs_info->unused_bgs_lock); + btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); + mutex_lock(&fs_info->delete_unused_bgs_mutex); /* Don't want to race with allocators so take the groups_sem */ @@ -1333,6 +1346,23 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) } mutex_unlock(&fs_info->unused_bg_unpin_mutex); + /* + * At this point, the block_group is read only and should fail + * new allocations. However, btrfs_finish_extent_commit() can + * cause this block_group to be placed back on the discard + * lists because now the block_group isn't fully discarded. + * Bail here and try again later after discarding everything. + */ + spin_lock(&fs_info->discard_ctl.lock); + if (!list_empty(&block_group->discard_list)) { + spin_unlock(&fs_info->discard_ctl.lock); + btrfs_dec_block_group_ro(block_group); + btrfs_discard_queue_work(&fs_info->discard_ctl, + block_group); + goto end_trans; + } + spin_unlock(&fs_info->discard_ctl.lock); + /* Reset pinned so btrfs_put_block_group doesn't complain */ spin_lock(&space_info->lock); spin_lock(&block_group->lock); @@ -1603,6 +1633,7 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->bg_list); INIT_LIST_HEAD(&cache->ro_list); + INIT_LIST_HEAD(&cache->discard_list); INIT_LIST_HEAD(&cache->dirty_list); INIT_LIST_HEAD(&cache->io_list); btrfs_init_free_space_ctl(cache); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 9b409676c4b2..884defd61dcd 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -116,7 +116,11 @@ struct btrfs_block_group { /* For read-only block groups */ struct list_head ro_list; + /* For discard operations */ atomic_t trimming; + struct list_head discard_list; + int discard_index; + u64 discard_eligible_time; /* For dirty block groups */ struct list_head dirty_list; @@ -158,6 +162,11 @@ struct btrfs_block_group { struct btrfs_full_stripe_locks_tree full_stripe_locks_root; }; +static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) +{ + return (block_group->start + block_group->length); +} + #ifdef CONFIG_BTRFS_DEBUG static inline int btrfs_should_fragment_free_space( struct btrfs_block_group *block_group) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2f6c21ea84af..f7b429277089 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -440,6 +440,21 @@ struct btrfs_full_stripe_locks_tree { struct mutex lock; }; +/* Discard control. */ +/* + * Async discard uses multiple lists to differentiate the discard filter + * parameters. + */ +#define BTRFS_NR_DISCARD_LISTS 1 + +struct btrfs_discard_ctl { + struct workqueue_struct *discard_workers; + struct delayed_work work; + spinlock_t lock; + struct btrfs_block_group *block_group; + struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; +}; + /* delayed seq elem */ struct seq_list { struct list_head list; @@ -526,6 +541,9 @@ enum { * so we don't need to offload checksums to workqueues. */ BTRFS_FS_CSUM_IMPL_FAST, + + /* Indicate that the discard workqueue can service discards. */ + BTRFS_FS_DISCARD_RUNNING, }; struct btrfs_fs_info { @@ -816,6 +834,8 @@ struct btrfs_fs_info { struct btrfs_workqueue *scrub_wr_completion_workers; struct btrfs_workqueue *scrub_parity_workers; + struct btrfs_discard_ctl discard_ctl; + #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY u32 check_integrity_print_mask; #endif @@ -1189,6 +1209,7 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) #define BTRFS_MOUNT_NOLOGREPLAY (1 << 27) #define BTRFS_MOUNT_REF_VERIFY (1 << 28) +#define BTRFS_MOUNT_DISCARD_ASYNC (1 << 29) #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) #define BTRFS_DEFAULT_MAX_INLINE (2048) diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c new file mode 100644 index 000000000000..5924e757471b --- /dev/null +++ b/fs/btrfs/discard.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "block-group.h" +#include "discard.h" +#include "free-space-cache.h" + +/* This is an initial delay to give some chance for block reuse */ +#define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) + +static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + return &discard_ctl->discard_list[block_group->discard_index]; +} + +static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + spin_lock(&discard_ctl->lock); + + if (!btrfs_run_discard_work(discard_ctl)) { + spin_unlock(&discard_ctl->lock); + return; + } + + if (list_empty(&block_group->discard_list)) + block_group->discard_eligible_time = (ktime_get_ns() + + BTRFS_DISCARD_DELAY); + + list_move_tail(&block_group->discard_list, + get_discard_list(discard_ctl, block_group)); + + spin_unlock(&discard_ctl->lock); +} + +static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + bool running = false; + + spin_lock(&discard_ctl->lock); + + if (block_group == discard_ctl->block_group) { + running = true; + discard_ctl->block_group = NULL; + } + + block_group->discard_eligible_time = 0; + list_del_init(&block_group->discard_list); + + spin_unlock(&discard_ctl->lock); + + return running; +} + +/** + * find_next_block_group - find block_group that's up next for discarding + * @discard_ctl: discard control + * @now: current time + * + * Iterate over the discard lists to find the next block_group up for + * discarding checking the discard_eligible_time of block_group. + */ +static struct btrfs_block_group *find_next_block_group( + struct btrfs_discard_ctl *discard_ctl, + u64 now) +{ + struct btrfs_block_group *ret_block_group = NULL, *block_group; + int i; + + for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) { + struct list_head *discard_list = &discard_ctl->discard_list[i]; + + if (!list_empty(discard_list)) { + block_group = list_first_entry(discard_list, + struct btrfs_block_group, + discard_list); + + if (!ret_block_group) + ret_block_group = block_group; + + if (ret_block_group->discard_eligible_time < now) + break; + + if (ret_block_group->discard_eligible_time > + block_group->discard_eligible_time) + ret_block_group = block_group; + } + } + + return ret_block_group; +} + +/** + * peek_discard_list - wrap find_next_block_group() + * @discard_ctl: discard control + * + * This wraps find_next_block_group() and sets the block_group to be in use. + */ +static struct btrfs_block_group *peek_discard_list( + struct btrfs_discard_ctl *discard_ctl) +{ + struct btrfs_block_group *block_group; + const u64 now = ktime_get_ns(); + + spin_lock(&discard_ctl->lock); + + block_group = find_next_block_group(discard_ctl, now); + + if (block_group && now < block_group->discard_eligible_time) + block_group = NULL; + + discard_ctl->block_group = block_group; + + spin_unlock(&discard_ctl->lock); + + return block_group; +} + +/** + * btrfs_discard_cancel_work - remove a block_group from the discard lists + * @discard_ctl: discard control + * @block_group: block_group of interest + * + * This removes @block_group from the discard lists. If necessary, it waits on + * the current work and then reschedules the delayed work. + */ +void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + if (remove_from_discard_list(discard_ctl, block_group)) { + cancel_delayed_work_sync(&discard_ctl->work); + btrfs_discard_schedule_work(discard_ctl, true); + } +} + +/** + * btrfs_discard_queue_work - handles queuing the block_groups + * @discard_ctl: discard control + * @block_group: block_group of interest + * + * This maintains the LRU order of the discard lists. + */ +void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) + return; + + add_to_discard_list(discard_ctl, block_group); + + if (!delayed_work_pending(&discard_ctl->work)) + btrfs_discard_schedule_work(discard_ctl, false); +} + +/** + * btrfs_discard_schedule_work - responsible for scheduling the discard work + * @discard_ctl: discard control + * @override: override the current timer + * + * Discards are issued by a delayed workqueue item. @override is used to + * update the current delay as the baseline delay interview is reevaluated + * on transaction commit. This is also maxed with any other rate limit. + */ +void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, + bool override) +{ + struct btrfs_block_group *block_group; + const u64 now = ktime_get_ns(); + + spin_lock(&discard_ctl->lock); + + if (!btrfs_run_discard_work(discard_ctl)) + goto out; + + if (!override && delayed_work_pending(&discard_ctl->work)) + goto out; + + block_group = find_next_block_group(discard_ctl, now); + if (block_group) { + u64 delay = 0; + + if (now < block_group->discard_eligible_time) + delay = nsecs_to_jiffies( + block_group->discard_eligible_time - now); + + mod_delayed_work(discard_ctl->discard_workers, + &discard_ctl->work, delay); + } +out: + spin_unlock(&discard_ctl->lock); +} + +/** + * btrfs_discard_workfn - discard work function + * @work: work + * + * This finds the next block_group to start discarding and then discards it. + */ +static void btrfs_discard_workfn(struct work_struct *work) +{ + struct btrfs_discard_ctl *discard_ctl; + struct btrfs_block_group *block_group; + u64 trimmed = 0; + + discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); + + block_group = peek_discard_list(discard_ctl); + if (!block_group || !btrfs_run_discard_work(discard_ctl)) + return; + + btrfs_trim_block_group(block_group, &trimmed, block_group->start, + btrfs_block_group_end(block_group), 0); + + remove_from_discard_list(discard_ctl, block_group); + btrfs_discard_schedule_work(discard_ctl, false); +} + +/** + * btrfs_run_discard_work - determines if async discard should be running + * @discard_ctl: discard control + * + * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set. + */ +bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) +{ + struct btrfs_fs_info *fs_info = container_of(discard_ctl, + struct btrfs_fs_info, + discard_ctl); + + return (!(fs_info->sb->s_flags & SB_RDONLY) && + test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); +} + +void btrfs_discard_resume(struct btrfs_fs_info *fs_info) +{ + if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) { + btrfs_discard_cleanup(fs_info); + return; + } + + set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); +} + +void btrfs_discard_stop(struct btrfs_fs_info *fs_info) +{ + clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); +} + +void btrfs_discard_init(struct btrfs_fs_info *fs_info) +{ + struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; + int i; + + spin_lock_init(&discard_ctl->lock); + INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn); + + for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) + INIT_LIST_HEAD(&discard_ctl->discard_list[i]); +} + +void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) +{ + btrfs_discard_stop(fs_info); + cancel_delayed_work_sync(&fs_info->discard_ctl.work); +} diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h new file mode 100644 index 000000000000..f3775e84d35a --- /dev/null +++ b/fs/btrfs/discard.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef BTRFS_DISCARD_H +#define BTRFS_DISCARD_H + +struct btrfs_fs_info; +struct btrfs_discard_ctl; +struct btrfs_block_group; + +void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group); +void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group); +void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, + bool override); +bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl); + +void btrfs_discard_resume(struct btrfs_fs_info *fs_info); +void btrfs_discard_stop(struct btrfs_fs_info *fs_info); +void btrfs_discard_init(struct btrfs_fs_info *fs_info); +void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info); + +#endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 881aba162e4e..5ce2801f8388 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -41,6 +41,7 @@ #include "tree-checker.h" #include "ref-verify.h" #include "block-group.h" +#include "discard.h" #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ BTRFS_HEADER_FLAG_RELOC |\ @@ -1953,6 +1954,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->readahead_workers); btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); + if (fs_info->discard_ctl.discard_workers) + destroy_workqueue(fs_info->discard_ctl.discard_workers); /* * Now that all other work queues are destroyed, we can safely destroy * the queues used for metadata I/O, since tasks from those other work @@ -2148,6 +2151,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, max_active, 2); fs_info->qgroup_rescan_workers = btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); + fs_info->discard_ctl.discard_workers = + alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); if (!(fs_info->workers && fs_info->delalloc_workers && fs_info->flush_workers && @@ -2158,7 +2163,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->endio_freespace_worker && fs_info->rmw_workers && fs_info->caching_workers && fs_info->readahead_workers && fs_info->fixup_workers && fs_info->delayed_workers && - fs_info->qgroup_rescan_workers)) { + fs_info->qgroup_rescan_workers && + fs_info->discard_ctl.discard_workers)) { return -ENOMEM; } @@ -2792,6 +2798,7 @@ int __cold open_ctree(struct super_block *sb, btrfs_init_dev_replace_locks(fs_info); btrfs_init_qgroup(fs_info); + btrfs_discard_init(fs_info); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); @@ -3255,6 +3262,7 @@ int __cold open_ctree(struct super_block *sb, } btrfs_qgroup_rescan_resume(fs_info); + btrfs_discard_resume(fs_info); if (!fs_info->uuid_root) { btrfs_info(fs_info, "creating UUID tree"); @@ -3971,6 +3979,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) cancel_work_sync(&fs_info->async_reclaim_work); + /* Cancel or finish ongoing discard work */ + btrfs_discard_cleanup(fs_info); + if (!sb_rdonly(fs_info->sb)) { /* * The cleaner kthread is stopped, so do one final pass over diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1a8bf943c3e7..2c12366cfde5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -32,6 +32,7 @@ #include "block-rsv.h" #include "delalloc-space.h" #include "block-group.h" +#include "discard.h" #undef SCRAMBLE_DELAYED_REFS @@ -2934,6 +2935,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) cond_resched(); } + if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) + btrfs_discard_schedule_work(&fs_info->discard_ctl, true); + /* * Transaction is finished. We don't need the lock anymore. We * do need to clean up the block groups in case of a transaction diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 063f4db44024..fdc5401f3877 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -21,6 +21,7 @@ #include "space-info.h" #include "delalloc-space.h" #include "block-group.h" +#include "discard.h" #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) #define MAX_CACHE_BYTES_PER_GIG SZ_32K @@ -755,9 +756,11 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, /* * Sync discard ensures that the free space cache is always * trimmed. So when reading this in, the state should reflect - * that. + * that. We also do this for async as a stop gap for lack of + * persistence. */ - if (btrfs_test_opt(fs_info, DISCARD_SYNC)) + if (btrfs_test_opt(fs_info, DISCARD_SYNC) || + btrfs_test_opt(fs_info, DISCARD_ASYNC)) e->trim_state = BTRFS_TRIM_STATE_TRIMMED; if (!e->bytes) { @@ -2382,6 +2385,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, u64 offset, u64 bytes, enum btrfs_trim_state trim_state) { + struct btrfs_block_group *block_group = ctl->private; struct btrfs_free_space *info; int ret = 0; @@ -2431,6 +2435,9 @@ out: ASSERT(ret != -EEXIST); } + if (trim_state != BTRFS_TRIM_STATE_TRIMMED) + btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); + return ret; } @@ -2447,6 +2454,25 @@ int btrfs_add_free_space(struct btrfs_block_group *block_group, bytenr, size, trim_state); } +/* + * This is a subtle distinction because when adding free space back in general, + * we want it to be added as untrimmed for async. But in the case where we add + * it on loading of a block group, we want to consider it trimmed. + */ +int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group, + u64 bytenr, u64 size) +{ + enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + + if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || + btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) + trim_state = BTRFS_TRIM_STATE_TRIMMED; + + return __btrfs_add_free_space(block_group->fs_info, + block_group->free_space_ctl, + bytenr, size, trim_state); +} + int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 offset, u64 bytes) { @@ -3208,6 +3234,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) static int do_trimming(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 bytes, u64 reserved_start, u64 reserved_bytes, + enum btrfs_trim_state reserved_trim_state, struct btrfs_trim_range *trim_entry) { struct btrfs_space_info *space_info = block_group->space_info; @@ -3215,6 +3242,9 @@ static int do_trimming(struct btrfs_block_group *block_group, struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; int ret; int update = 0; + const u64 end = start + bytes; + const u64 reserved_end = reserved_start + reserved_bytes; + enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; u64 trimmed = 0; spin_lock(&space_info->lock); @@ -3228,11 +3258,20 @@ static int do_trimming(struct btrfs_block_group *block_group, spin_unlock(&space_info->lock); ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed); - if (!ret) + if (!ret) { *total_trimmed += trimmed; + trim_state = BTRFS_TRIM_STATE_TRIMMED; + } mutex_lock(&ctl->cache_writeout_mutex); - btrfs_add_free_space(block_group, reserved_start, reserved_bytes); + if (reserved_start < start) + __btrfs_add_free_space(fs_info, ctl, reserved_start, + start - reserved_start, + reserved_trim_state); + if (start + bytes < reserved_start + reserved_bytes) + __btrfs_add_free_space(fs_info, ctl, end, reserved_end - end, + reserved_trim_state); + __btrfs_add_free_space(fs_info, ctl, start, bytes, trim_state); list_del(&trim_entry->list); mutex_unlock(&ctl->cache_writeout_mutex); @@ -3259,6 +3298,7 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, int ret = 0; u64 extent_start; u64 extent_bytes; + enum btrfs_trim_state extent_trim_state; u64 bytes; while (start < end) { @@ -3300,6 +3340,7 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, extent_start = entry->offset; extent_bytes = entry->bytes; + extent_trim_state = entry->trim_state; start = max(start, extent_start); bytes = min(extent_start + extent_bytes, end) - start; if (bytes < minlen) { @@ -3318,7 +3359,8 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, mutex_unlock(&ctl->cache_writeout_mutex); ret = do_trimming(block_group, total_trimmed, start, bytes, - extent_start, extent_bytes, &trim_entry); + extent_start, extent_bytes, extent_trim_state, + &trim_entry); if (ret) break; next: @@ -3442,7 +3484,7 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, mutex_unlock(&ctl->cache_writeout_mutex); ret = do_trimming(block_group, total_trimmed, start, bytes, - start, bytes, &trim_entry); + start, bytes, 0, &trim_entry); if (ret) { reset_trimming_bitmap(ctl, offset); break; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 5f25d94c9946..6a85a5d16343 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -113,6 +113,8 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, enum btrfs_trim_state trim_state); int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); +int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group, + u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 08ac6a7a67f0..a906315efd19 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -46,6 +46,7 @@ #include "sysfs.h" #include "tests/btrfs-tests.h" #include "block-group.h" +#include "discard.h" #include "qgroup.h" #define CREATE_TRACE_POINTS @@ -146,6 +147,8 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function if (sb_rdonly(sb)) return; + btrfs_discard_stop(fs_info); + /* btrfs handle error by forcing the filesystem readonly */ sb->s_flags |= SB_RDONLY; btrfs_info(fs_info, "forced readonly"); @@ -313,6 +316,7 @@ enum { Opt_datasum, Opt_nodatasum, Opt_defrag, Opt_nodefrag, Opt_discard, Opt_nodiscard, + Opt_discard_mode, Opt_nologreplay, Opt_norecovery, Opt_ratio, @@ -375,6 +379,7 @@ static const match_table_t tokens = { {Opt_defrag, "autodefrag"}, {Opt_nodefrag, "noautodefrag"}, {Opt_discard, "discard"}, + {Opt_discard_mode, "discard=%s"}, {Opt_nodiscard, "nodiscard"}, {Opt_nologreplay, "nologreplay"}, {Opt_norecovery, "norecovery"}, @@ -695,12 +700,26 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->metadata_ratio); break; case Opt_discard: - btrfs_set_and_info(info, DISCARD_SYNC, - "turning on sync discard"); + case Opt_discard_mode: + if (token == Opt_discard || + strcmp(args[0].from, "sync") == 0) { + btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC); + btrfs_set_and_info(info, DISCARD_SYNC, + "turning on sync discard"); + } else if (strcmp(args[0].from, "async") == 0) { + btrfs_clear_opt(info->mount_opt, DISCARD_SYNC); + btrfs_set_and_info(info, DISCARD_ASYNC, + "turning on async discard"); + } else { + ret = -EINVAL; + goto out; + } break; case Opt_nodiscard: btrfs_clear_and_info(info, DISCARD_SYNC, "turning off discard"); + btrfs_clear_and_info(info, DISCARD_ASYNC, + "turning off async discard"); break; case Opt_space_cache: case Opt_space_cache_version: @@ -1324,6 +1343,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",flushoncommit"); if (btrfs_test_opt(info, DISCARD_SYNC)) seq_puts(seq, ",discard"); + if (btrfs_test_opt(info, DISCARD_ASYNC)) + seq_puts(seq, ",discard=async"); if (!(info->sb->s_flags & SB_POSIXACL)) seq_puts(seq, ",noacl"); if (btrfs_test_opt(info, SPACE_CACHE)) @@ -1713,6 +1734,14 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, btrfs_cleanup_defrag_inodes(fs_info); } + /* If we toggled discard async */ + if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && + btrfs_test_opt(fs_info, DISCARD_ASYNC)) + btrfs_discard_resume(fs_info); + else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && + !btrfs_test_opt(fs_info, DISCARD_ASYNC)) + btrfs_discard_cleanup(fs_info); + clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); } @@ -1760,6 +1789,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) */ cancel_work_sync(&fs_info->async_reclaim_work); + btrfs_discard_cleanup(fs_info); + /* wait for the uuid_scan task to finish */ down(&fs_info->uuid_tree_rescan_sem); /* avoid complains from lockdep et al. */ diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 86990b7a60ed..d184a994c392 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -30,6 +30,7 @@ #include "tree-checker.h" #include "space-info.h" #include "block-group.h" +#include "discard.h" const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID10] = { @@ -2870,6 +2871,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_trans_handle *trans; + struct btrfs_block_group *block_group; int ret; /* @@ -2893,6 +2895,12 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) if (ret) return ret; + block_group = btrfs_lookup_block_group(fs_info, chunk_offset); + if (!block_group) + return -ENOENT; + btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); + btrfs_put_block_group(block_group); + trans = btrfs_start_trans_remove_block_group(root->fs_info, chunk_offset); if (IS_ERR(trans)) { -- cgit v1.2.3-59-g8ed1b From 6e80d4f8c422d3b2b0c37324d3243f5ed9b558c8 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:15 -0800 Subject: btrfs: handle empty block_group removal for async discard block_group removal is a little tricky. It can race with the extent allocator, the cleaner thread, and balancing. The current path is for a block_group to be added to the unused_bgs list. Then, when the cleaner thread comes around, it starts a transaction and then proceeds with removing the block_group. Extents that are pinned are subsequently removed from the pinned trees and then eventually a discard is issued for the entire block_group. Async discard introduces another player into the game, the discard workqueue. While it has none of the racing issues, the new problem is ensuring we don't leave free space untrimmed prior to forgetting the block_group. This is handled by placing fully free block_groups on a separate discard queue. This is necessary to maintain discarding order as in the future we will slowly trim even fully free block_groups. The ordering helps us make progress on the same block_group rather than say the last fully freed block_group or needing to search through the fully freed block groups at the beginning of a list and insert after. The new order of events is a fully freed block group gets placed on the unused discard queue first. Once it's processed, it will be placed on the unusued_bgs list and then the original sequence of events will happen, just without the final whole block_group discard. The mount flags can change when processing unused_bgs, so when flipping from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt free block groups on the discard_list to the unused_bg queue which will do the final discard for us. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 47 ++++++++++++++++-- fs/btrfs/ctree.h | 9 +++- fs/btrfs/discard.c | 115 ++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/discard.h | 3 ++ fs/btrfs/free-space-cache.c | 33 +++++++++++++ fs/btrfs/free-space-cache.h | 1 + fs/btrfs/scrub.c | 7 ++- 7 files changed, 206 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 6ba15c45e779..ba71a84fd1ee 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1246,6 +1246,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; struct btrfs_trans_handle *trans; + const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); int ret = 0; if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) @@ -1275,6 +1276,22 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) /* Don't want to race with allocators so take the groups_sem */ down_write(&space_info->groups_sem); + + /* + * Async discard moves the final block group discard to be prior + * to the unused_bgs code path. Therefore, if it's not fully + * trimmed, punt it back to the async discard lists. + */ + if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && + !btrfs_is_free_space_trimmed(block_group)) { + trace_btrfs_skip_unused_block_group(block_group); + up_write(&space_info->groups_sem); + /* Requeue if we failed because of async discard */ + btrfs_discard_queue_work(&fs_info->discard_ctl, + block_group); + goto next; + } + spin_lock(&block_group->lock); if (block_group->reserved || block_group->pinned || block_group->used || block_group->ro || @@ -1378,6 +1395,16 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&block_group->lock); spin_unlock(&space_info->lock); + /* + * The normal path here is an unused block group is passed here, + * then trimming is handled in the transaction commit path. + * Async discard interposes before this to do the trimming + * before coming down the unused block group path as trimming + * will no longer be done later in the transaction commit path. + */ + if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) + goto flip_async; + /* DISCARD can flip during remount */ trimming = btrfs_test_opt(fs_info, DISCARD_SYNC); @@ -1422,6 +1449,13 @@ next: spin_lock(&fs_info->unused_bgs_lock); } spin_unlock(&fs_info->unused_bgs_lock); + return; + +flip_async: + btrfs_end_transaction(trans); + mutex_unlock(&fs_info->delete_unused_bgs_mutex); + btrfs_put_block_group(block_group); + btrfs_discard_punt_unused_bgs_list(fs_info); } void btrfs_mark_bg_unused(struct btrfs_block_group *bg) @@ -1626,6 +1660,8 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); set_free_space_tree_thresholds(cache); + cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; + atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); init_rwsem(&cache->data_rwsem); @@ -1792,7 +1828,10 @@ static int read_one_block_group(struct btrfs_fs_info *info, inc_block_group_ro(cache, 1); } else if (cache->used == 0) { ASSERT(list_empty(&cache->bg_list)); - btrfs_mark_bg_unused(cache); + if (btrfs_test_opt(info, DISCARD_ASYNC)) + btrfs_discard_queue_work(&info->discard_ctl, cache); + else + btrfs_mark_bg_unused(cache); } return 0; error: @@ -2755,8 +2794,10 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * dirty list to avoid races between cleaner kthread and space * cache writeout. */ - if (!alloc && old_val == 0) - btrfs_mark_bg_unused(cache); + if (!alloc && old_val == 0) { + if (!btrfs_test_opt(info, DISCARD_ASYNC)) + btrfs_mark_bg_unused(cache); + } btrfs_put_block_group(cache); total -= num_bytes; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f7b429277089..d15a4aa721aa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -443,9 +443,14 @@ struct btrfs_full_stripe_locks_tree { /* Discard control. */ /* * Async discard uses multiple lists to differentiate the discard filter - * parameters. + * parameters. Index 0 is for completely free block groups where we need to + * ensure the entire block group is trimmed without being lossy. Indices + * afterwards represent monotonically decreasing discard filter sizes to + * prioritize what should be discarded next. */ -#define BTRFS_NR_DISCARD_LISTS 1 +#define BTRFS_NR_DISCARD_LISTS 2 +#define BTRFS_DISCARD_INDEX_UNUSED 0 +#define BTRFS_DISCARD_INDEX_START 1 struct btrfs_discard_ctl { struct workqueue_struct *discard_workers; diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 5924e757471b..0f1c6d01aab0 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -13,6 +13,7 @@ /* This is an initial delay to give some chance for block reuse */ #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) +#define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) @@ -30,9 +31,13 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, return; } - if (list_empty(&block_group->discard_list)) + if (list_empty(&block_group->discard_list) || + block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) { + if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) + block_group->discard_index = BTRFS_DISCARD_INDEX_START; block_group->discard_eligible_time = (ktime_get_ns() + BTRFS_DISCARD_DELAY); + } list_move_tail(&block_group->discard_list, get_discard_list(discard_ctl, block_group)); @@ -40,6 +45,27 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, spin_unlock(&discard_ctl->lock); } +static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + spin_lock(&discard_ctl->lock); + + if (!btrfs_run_discard_work(discard_ctl)) { + spin_unlock(&discard_ctl->lock); + return; + } + + list_del_init(&block_group->discard_list); + + block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED; + block_group->discard_eligible_time = (ktime_get_ns() + + BTRFS_DISCARD_UNUSED_DELAY); + list_add_tail(&block_group->discard_list, + &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]); + + spin_unlock(&discard_ctl->lock); +} + static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) { @@ -154,7 +180,10 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) return; - add_to_discard_list(discard_ctl, block_group); + if (block_group->used == 0) + add_to_discard_unused_list(discard_ctl, block_group); + else + add_to_discard_list(discard_ctl, block_group); if (!delayed_work_pending(&discard_ctl->work)) btrfs_discard_schedule_work(discard_ctl, false); @@ -198,6 +227,29 @@ out: spin_unlock(&discard_ctl->lock); } +/** + * btrfs_finish_discard_pass - determine next step of a block_group + * @discard_ctl: discard control + * @block_group: block_group of interest + * + * This determines the next step for a block group after it's finished going + * through a pass on a discard list. If it is unused and fully trimmed, we can + * mark it unused and send it to the unused_bgs path. Otherwise, pass it onto + * the appropriate filter list or let it fall off. + */ +static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + remove_from_discard_list(discard_ctl, block_group); + + if (block_group->used == 0) { + if (btrfs_is_free_space_trimmed(block_group)) + btrfs_mark_bg_unused(block_group); + else + add_to_discard_unused_list(discard_ctl, block_group); + } +} + /** * btrfs_discard_workfn - discard work function * @work: work @@ -219,7 +271,7 @@ static void btrfs_discard_workfn(struct work_struct *work) btrfs_trim_block_group(block_group, &trimmed, block_group->start, btrfs_block_group_end(block_group), 0); - remove_from_discard_list(discard_ctl, block_group); + btrfs_finish_discard_pass(discard_ctl, block_group); btrfs_discard_schedule_work(discard_ctl, false); } @@ -239,6 +291,60 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); } +/** + * btrfs_discard_punt_unused_bgs_list - punt unused_bgs list to discard lists + * @fs_info: fs_info of interest + * + * The unused_bgs list needs to be punted to the discard lists because the + * order of operations is changed. In the normal sychronous discard path, the + * block groups are trimmed via a single large trim in transaction commit. This + * is ultimately what we are trying to avoid with asynchronous discard. Thus, + * it must be done before going down the unused_bgs path. + */ +void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info) +{ + struct btrfs_block_group *block_group, *next; + + spin_lock(&fs_info->unused_bgs_lock); + /* We enabled async discard, so punt all to the queue */ + list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs, + bg_list) { + list_del_init(&block_group->bg_list); + btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); + } + spin_unlock(&fs_info->unused_bgs_lock); +} + +/** + * btrfs_discard_purge_list - purge discard lists + * @discard_ctl: discard control + * + * If we are disabling async discard, we may have intercepted block groups that + * are completely free and ready for the unused_bgs path. As discarding will + * now happen in transaction commit or not at all, we can safely mark the + * corresponding block groups as unused and they will be sent on their merry + * way to the unused_bgs list. + */ +static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl) +{ + struct btrfs_block_group *block_group, *next; + int i; + + spin_lock(&discard_ctl->lock); + for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) { + list_for_each_entry_safe(block_group, next, + &discard_ctl->discard_list[i], + discard_list) { + list_del_init(&block_group->discard_list); + spin_unlock(&discard_ctl->lock); + if (block_group->used == 0) + btrfs_mark_bg_unused(block_group); + spin_lock(&discard_ctl->lock); + } + } + spin_unlock(&discard_ctl->lock); +} + void btrfs_discard_resume(struct btrfs_fs_info *fs_info) { if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) { @@ -246,6 +352,8 @@ void btrfs_discard_resume(struct btrfs_fs_info *fs_info) return; } + btrfs_discard_punt_unused_bgs_list(fs_info); + set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); } @@ -270,4 +378,5 @@ void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) { btrfs_discard_stop(fs_info); cancel_delayed_work_sync(&fs_info->discard_ctl.work); + btrfs_discard_purge_list(&fs_info->discard_ctl); } diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index f3775e84d35a..3c5a04f8714f 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -7,6 +7,7 @@ struct btrfs_fs_info; struct btrfs_discard_ctl; struct btrfs_block_group; +/* Work operations */ void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group); void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, @@ -15,6 +16,8 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, bool override); bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl); +/* Setup/cleanup operations */ +void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info); void btrfs_discard_resume(struct btrfs_fs_info *fs_info); void btrfs_discard_stop(struct btrfs_fs_info *fs_info); void btrfs_discard_init(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index fdc5401f3877..3c2796bb6498 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2707,6 +2707,37 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) } +/** + * btrfs_is_free_space_trimmed - see if everything is trimmed + * @block_group: block_group of interest + * + * Walk @block_group's free space rb_tree to determine if everything is trimmed. + */ +bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_free_space *info; + struct rb_node *node; + bool ret = true; + + spin_lock(&ctl->tree_lock); + node = rb_first(&ctl->free_space_offset); + + while (node) { + info = rb_entry(node, struct btrfs_free_space, offset_index); + + if (!btrfs_free_space_trimmed(info)) { + ret = false; + break; + } + + node = rb_next(node); + } + + spin_unlock(&ctl->tree_lock); + return ret; +} + u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size) @@ -2793,6 +2824,8 @@ int btrfs_return_cluster_to_free_space( ret = __btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&ctl->tree_lock); + btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); + /* finally drop our ref */ btrfs_put_block_group(block_group); return ret; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 6a85a5d16343..f799eb491410 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -119,6 +119,7 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group); +bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 21de630b0730..22cf69e6e5bc 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -8,6 +8,7 @@ #include #include #include "ctree.h" +#include "discard.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" @@ -3659,7 +3660,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, if (!cache->removed && !cache->ro && cache->reserved == 0 && cache->used == 0) { spin_unlock(&cache->lock); - btrfs_mark_bg_unused(cache); + if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) + btrfs_discard_queue_work(&fs_info->discard_ctl, + cache); + else + btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } -- cgit v1.2.3-59-g8ed1b From 2bee7eb8bb8185679ea282b8ccff6bfabcf52a63 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:16 -0800 Subject: btrfs: discard one region at a time in async discard The prior two patches added discarding via a background workqueue. This just piggybacked off of the fstrim code to trim the whole block at once. Well inevitably this is worse performance wise and will aggressively overtrim. But it was nice to plumb the other infrastructure to keep the patches easier to review. This adds the real goal of this series which is discarding slowly (ie. a slow long running fstrim). The discarding is split into two phases, extents and then bitmaps. The reason for this is two fold. First, the bitmap regions overlap the extent regions. Second, discarding the extents first will let the newly trimmed bitmaps have the highest chance of coalescing when being readded to the free space cache. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 15 +++++ fs/btrfs/discard.c | 85 ++++++++++++++++++++++------ fs/btrfs/free-space-cache.c | 131 ++++++++++++++++++++++++++++++++++---------- fs/btrfs/free-space-cache.h | 6 ++ 4 files changed, 191 insertions(+), 46 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 884defd61dcd..a8d2edcd8760 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -12,6 +12,19 @@ enum btrfs_disk_cache_state { BTRFS_DC_SETUP, }; +/* + * This describes the state of the block_group for async discard. This is due + * to the two pass nature of it where extent discarding is prioritized over + * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting + * between lists to prevent contention for discard state variables + * (eg. discard_cursor). + */ +enum btrfs_discard_state { + BTRFS_DISCARD_EXTENTS, + BTRFS_DISCARD_BITMAPS, + BTRFS_DISCARD_RESET_CURSOR, +}; + /* * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to * only allocate a chunk if we really need one. @@ -121,6 +134,8 @@ struct btrfs_block_group { struct list_head discard_list; int discard_index; u64 discard_eligible_time; + u64 discard_cursor; + enum btrfs_discard_state discard_state; /* For dirty block groups */ struct list_head dirty_list; diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 0f1c6d01aab0..cb9a23217ec6 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -21,15 +21,11 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, return &discard_ctl->discard_list[block_group->discard_index]; } -static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, - struct btrfs_block_group *block_group) +static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) { - spin_lock(&discard_ctl->lock); - - if (!btrfs_run_discard_work(discard_ctl)) { - spin_unlock(&discard_ctl->lock); + if (!btrfs_run_discard_work(discard_ctl)) return; - } if (list_empty(&block_group->discard_list) || block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) { @@ -37,11 +33,18 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, block_group->discard_index = BTRFS_DISCARD_INDEX_START; block_group->discard_eligible_time = (ktime_get_ns() + BTRFS_DISCARD_DELAY); + block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; } list_move_tail(&block_group->discard_list, get_discard_list(discard_ctl, block_group)); +} +static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + spin_lock(&discard_ctl->lock); + __add_to_discard_list(discard_ctl, block_group); spin_unlock(&discard_ctl->lock); } @@ -60,6 +63,7 @@ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl, block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED; block_group->discard_eligible_time = (ktime_get_ns() + BTRFS_DISCARD_UNUSED_DELAY); + block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; list_add_tail(&block_group->discard_list, &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]); @@ -127,23 +131,40 @@ static struct btrfs_block_group *find_next_block_group( /** * peek_discard_list - wrap find_next_block_group() * @discard_ctl: discard control + * @discard_state: the discard_state of the block_group after state management * * This wraps find_next_block_group() and sets the block_group to be in use. + * discard_state's control flow is managed here. Variables related to + * discard_state are reset here as needed (eg. discard_cursor). @discard_state + * is remembered as it may change while we're discarding, but we want the + * discard to execute in the context determined here. */ static struct btrfs_block_group *peek_discard_list( - struct btrfs_discard_ctl *discard_ctl) + struct btrfs_discard_ctl *discard_ctl, + enum btrfs_discard_state *discard_state) { struct btrfs_block_group *block_group; const u64 now = ktime_get_ns(); spin_lock(&discard_ctl->lock); - +again: block_group = find_next_block_group(discard_ctl, now); - if (block_group && now < block_group->discard_eligible_time) + if (block_group && now > block_group->discard_eligible_time) { + if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && + block_group->used != 0) { + __add_to_discard_list(discard_ctl, block_group); + goto again; + } + if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) { + block_group->discard_cursor = block_group->start; + block_group->discard_state = BTRFS_DISCARD_EXTENTS; + } + discard_ctl->block_group = block_group; + *discard_state = block_group->discard_state; + } else { block_group = NULL; - - discard_ctl->block_group = block_group; + } spin_unlock(&discard_ctl->lock); @@ -254,24 +275,54 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl, * btrfs_discard_workfn - discard work function * @work: work * - * This finds the next block_group to start discarding and then discards it. + * This finds the next block_group to start discarding and then discards a + * single region. It does this in a two-pass fashion: first extents and second + * bitmaps. Completely discarded block groups are sent to the unused_bgs path. */ static void btrfs_discard_workfn(struct work_struct *work) { struct btrfs_discard_ctl *discard_ctl; struct btrfs_block_group *block_group; + enum btrfs_discard_state discard_state; u64 trimmed = 0; discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); - block_group = peek_discard_list(discard_ctl); + block_group = peek_discard_list(discard_ctl, &discard_state); if (!block_group || !btrfs_run_discard_work(discard_ctl)) return; - btrfs_trim_block_group(block_group, &trimmed, block_group->start, - btrfs_block_group_end(block_group), 0); + /* Perform discarding */ + if (discard_state == BTRFS_DISCARD_BITMAPS) + btrfs_trim_block_group_bitmaps(block_group, &trimmed, + block_group->discard_cursor, + btrfs_block_group_end(block_group), + 0, true); + else + btrfs_trim_block_group_extents(block_group, &trimmed, + block_group->discard_cursor, + btrfs_block_group_end(block_group), + 0, true); + + /* Determine next steps for a block_group */ + if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { + if (discard_state == BTRFS_DISCARD_BITMAPS) { + btrfs_finish_discard_pass(discard_ctl, block_group); + } else { + block_group->discard_cursor = block_group->start; + spin_lock(&discard_ctl->lock); + if (block_group->discard_state != + BTRFS_DISCARD_RESET_CURSOR) + block_group->discard_state = + BTRFS_DISCARD_BITMAPS; + spin_unlock(&discard_ctl->lock); + } + } + + spin_lock(&discard_ctl->lock); + discard_ctl->block_group = NULL; + spin_unlock(&discard_ctl->lock); - btrfs_finish_discard_pass(discard_ctl, block_group); btrfs_discard_schedule_work(discard_ctl, false); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3c2796bb6498..2e8fbd67ec9b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3322,8 +3322,12 @@ static int do_trimming(struct btrfs_block_group *block_group, return ret; } +/* + * If @async is set, then we will trim 1 region and return. + */ static int trim_no_bitmap(struct btrfs_block_group *block_group, - u64 *total_trimmed, u64 start, u64 end, u64 minlen) + u64 *total_trimmed, u64 start, u64 end, u64 minlen, + bool async) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; @@ -3340,36 +3344,25 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, mutex_lock(&ctl->cache_writeout_mutex); spin_lock(&ctl->tree_lock); - if (ctl->free_space < minlen) { - spin_unlock(&ctl->tree_lock); - mutex_unlock(&ctl->cache_writeout_mutex); - break; - } + if (ctl->free_space < minlen) + goto out_unlock; entry = tree_search_offset(ctl, start, 0, 1); - if (!entry) { - spin_unlock(&ctl->tree_lock); - mutex_unlock(&ctl->cache_writeout_mutex); - break; - } + if (!entry) + goto out_unlock; - /* skip bitmaps */ - while (entry->bitmap) { + /* Skip bitmaps and if async, already trimmed entries */ + while (entry->bitmap || + (async && btrfs_free_space_trimmed(entry))) { node = rb_next(&entry->offset_index); - if (!node) { - spin_unlock(&ctl->tree_lock); - mutex_unlock(&ctl->cache_writeout_mutex); - goto out; - } + if (!node) + goto out_unlock; entry = rb_entry(node, struct btrfs_free_space, offset_index); } - if (entry->offset >= end) { - spin_unlock(&ctl->tree_lock); - mutex_unlock(&ctl->cache_writeout_mutex); - break; - } + if (entry->offset >= end) + goto out_unlock; extent_start = entry->offset; extent_bytes = entry->bytes; @@ -3394,10 +3387,15 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, ret = do_trimming(block_group, total_trimmed, start, bytes, extent_start, extent_bytes, extent_trim_state, &trim_entry); - if (ret) + if (ret) { + block_group->discard_cursor = start + bytes; break; + } next: start += bytes; + block_group->discard_cursor = start; + if (async && *total_trimmed) + break; if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; @@ -3406,7 +3404,14 @@ next: cond_resched(); } -out: + + return ret; + +out_unlock: + block_group->discard_cursor = btrfs_block_group_end(block_group); + spin_unlock(&ctl->tree_lock); + mutex_unlock(&ctl->cache_writeout_mutex); + return ret; } @@ -3441,8 +3446,12 @@ static void end_trimming_bitmap(struct btrfs_free_space *entry) entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; } +/* + * If @async is set, then we will trim 1 region and return. + */ static int trim_bitmaps(struct btrfs_block_group *block_group, - u64 *total_trimmed, u64 start, u64 end, u64 minlen) + u64 *total_trimmed, u64 start, u64 end, u64 minlen, + bool async) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; @@ -3459,13 +3468,16 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, spin_lock(&ctl->tree_lock); if (ctl->free_space < minlen) { + block_group->discard_cursor = + btrfs_block_group_end(block_group); spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); break; } entry = tree_search_offset(ctl, offset, 1, 0); - if (!entry || btrfs_free_space_trimmed(entry)) { + if (!entry || (async && start == offset && + btrfs_free_space_trimmed(entry))) { spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); next_bitmap = true; @@ -3498,6 +3510,16 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, goto next; } + /* + * We already trimmed a region, but are using the locking above + * to reset the trim_state. + */ + if (async && *total_trimmed) { + spin_unlock(&ctl->tree_lock); + mutex_unlock(&ctl->cache_writeout_mutex); + goto out; + } + bytes = min(bytes, end - start); if (bytes < minlen) { entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; @@ -3520,6 +3542,8 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, start, bytes, 0, &trim_entry); if (ret) { reset_trimming_bitmap(ctl, offset); + block_group->discard_cursor = + btrfs_block_group_end(block_group); break; } next: @@ -3529,6 +3553,7 @@ next: } else { start += bytes; } + block_group->discard_cursor = start; if (fatal_signal_pending(current)) { if (start != offset) @@ -3540,6 +3565,10 @@ next: cond_resched(); } + if (offset >= end) + block_group->discard_cursor = end; + +out: return ret; } @@ -3600,11 +3629,11 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, btrfs_get_block_group_trimming(block_group); spin_unlock(&block_group->lock); - ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); + ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false); if (ret) goto out; - ret = trim_bitmaps(block_group, trimmed, start, end, minlen); + ret = trim_bitmaps(block_group, trimmed, start, end, minlen, false); div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); /* If we ended in the middle of a bitmap, reset the trimming flag */ if (rem) @@ -3614,6 +3643,50 @@ out: return ret; } +int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen, + bool async) +{ + int ret; + + *trimmed = 0; + + spin_lock(&block_group->lock); + if (block_group->removed) { + spin_unlock(&block_group->lock); + return 0; + } + btrfs_get_block_group_trimming(block_group); + spin_unlock(&block_group->lock); + + ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async); + btrfs_put_block_group_trimming(block_group); + + return ret; +} + +int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen, + bool async) +{ + int ret; + + *trimmed = 0; + + spin_lock(&block_group->lock); + if (block_group->removed) { + spin_unlock(&block_group->lock); + return 0; + } + btrfs_get_block_group_trimming(block_group); + spin_unlock(&block_group->lock); + + ret = trim_bitmaps(block_group, trimmed, start, end, minlen, async); + btrfs_put_block_group_trimming(block_group); + + return ret; +} + /* * Find the left-most item in the cache tree, and then return the * smallest inode number in the item. diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index f799eb491410..765c5cbd99a5 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -138,6 +138,12 @@ int btrfs_return_cluster_to_free_space( struct btrfs_free_cluster *cluster); int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen); +int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen, + bool async); +int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen, + bool async); /* Support functions for running our sanity tests */ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS -- cgit v1.2.3-59-g8ed1b From 71e8978eb456958784539f02fa56087300f3a993 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:17 -0800 Subject: btrfs: sysfs: add removal calls for debug/ We probably should call sysfs_remove_group() on debug/. Reviewed-by: Josef Bacik Reviewed-by: Anand Jain Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 16379f491ca1..4c022757ffa4 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -771,6 +771,10 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) kobject_del(fs_info->space_info_kobj); kobject_put(fs_info->space_info_kobj); } +#ifdef CONFIG_BTRFS_DEBUG + sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, + &btrfs_debug_feature_attr_group); +#endif addrm_unknown_feature_attrs(fs_info, false); sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, &btrfs_feature_attr_group); sysfs_remove_files(&fs_info->fs_devices->fsid_kobj, btrfs_attrs); @@ -1205,6 +1209,9 @@ void __cold btrfs_exit_sysfs(void) sysfs_unmerge_group(&btrfs_kset->kobj, &btrfs_static_feature_attr_group); sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); +#ifdef CONFIG_BTRFS_DEBUG + sysfs_remove_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); +#endif kset_unregister(btrfs_kset); } -- cgit v1.2.3-59-g8ed1b From 93945cb43ead1e22e0d5ab50ae361a62cb783ab2 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:18 -0800 Subject: btrfs: sysfs: make UUID/debug have its own kobject Btrfs only allowed attributes to be exposed in debug/. Let's let other groups be created by making debug its own kobject. This also makes the per-fs debug options separate from the global features mount attributes. This seems to be needed as sysfs_create_files() requires const struct attribute * while sysfs_create_group() can take struct attribute *. This seems nicer as per file system, you'll probably use to_fs_info(). Reviewed-by: Josef Bacik Reviewed-by: Anand Jain Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++++ fs/btrfs/sysfs.c | 20 ++++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d15a4aa721aa..0626e5562993 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -927,6 +927,10 @@ struct btrfs_fs_info { spinlock_t ref_verify_lock; struct rb_root block_tree; #endif + +#ifdef CONFIG_BTRFS_DEBUG + struct kobject *debug_kobj; +#endif }; static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4c022757ffa4..5a09c7a14124 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -344,6 +344,10 @@ static const struct attribute_group btrfs_static_feature_attr_group = { * /sys/fs/btrfs/debug - applies to module or all filesystems * /sys/fs/btrfs/UUID - applies only to the given filesystem */ +static const struct attribute *btrfs_debug_mount_attrs[] = { + NULL, +}; + static struct attribute *btrfs_debug_feature_attrs[] = { NULL }; @@ -772,8 +776,11 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) kobject_put(fs_info->space_info_kobj); } #ifdef CONFIG_BTRFS_DEBUG - sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, - &btrfs_debug_feature_attr_group); + if (fs_info->debug_kobj) { + sysfs_remove_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); + kobject_del(fs_info->debug_kobj); + kobject_put(fs_info->debug_kobj); + } #endif addrm_unknown_feature_attrs(fs_info, false); sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, &btrfs_feature_attr_group); @@ -1111,8 +1118,13 @@ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) goto failure; #ifdef CONFIG_BTRFS_DEBUG - error = sysfs_create_group(fsid_kobj, - &btrfs_debug_feature_attr_group); + fs_info->debug_kobj = kobject_create_and_add("debug", fsid_kobj); + if (!fs_info->debug_kobj) { + error = -ENOMEM; + goto failure; + } + + error = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); if (error) goto failure; #endif -- cgit v1.2.3-59-g8ed1b From e4faab844a55edb9b628bf1f982fbc30f07b9700 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:19 -0800 Subject: btrfs: sysfs: add UUID/debug/discard directory Setup base sysfs directory for discard stats + tunables. Reviewed-by: Josef Bacik Reviewed-by: Anand Jain Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/sysfs.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0626e5562993..71317047c321 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -930,6 +930,7 @@ struct btrfs_fs_info { #ifdef CONFIG_BTRFS_DEBUG struct kobject *debug_kobj; + struct kobject *discard_debug_kobj; #endif }; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 5a09c7a14124..0eec1a8d45bd 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -338,6 +338,13 @@ static const struct attribute_group btrfs_static_feature_attr_group = { #ifdef CONFIG_BTRFS_DEBUG +/* + * Discard statistics and tunables + */ +static const struct attribute *discard_debug_attrs[] = { + NULL, +}; + /* * Runtime debugging exported via sysfs * @@ -776,6 +783,12 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) kobject_put(fs_info->space_info_kobj); } #ifdef CONFIG_BTRFS_DEBUG + if (fs_info->discard_debug_kobj) { + sysfs_remove_files(fs_info->discard_debug_kobj, + discard_debug_attrs); + kobject_del(fs_info->discard_debug_kobj); + kobject_put(fs_info->discard_debug_kobj); + } if (fs_info->debug_kobj) { sysfs_remove_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); kobject_del(fs_info->debug_kobj); @@ -1127,6 +1140,19 @@ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) error = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); if (error) goto failure; + + /* Discard directory */ + fs_info->discard_debug_kobj = kobject_create_and_add("discard", + fs_info->debug_kobj); + if (!fs_info->discard_debug_kobj) { + error = -ENOMEM; + goto failure; + } + + error = sysfs_create_files(fs_info->discard_debug_kobj, + discard_debug_attrs); + if (error) + goto failure; #endif error = addrm_unknown_feature_attrs(fs_info, true); -- cgit v1.2.3-59-g8ed1b From dfb79ddb130e0a239e3e90aaf5f5b908555f52bb Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:20 -0800 Subject: btrfs: track discardable extents for async discard The number of discardable extents will serve as the rate limiting metric for how often we should discard. This keeps track of discardable extents in the free space caches by maintaining deltas and propagating them to the global count. The deltas are calculated from 2 values stored in PREV and CURR entries, then propagated up to the global discard ctl. The current counter value becomes the previous counter value after update. Signed-off-by: Dennis Zhou Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 9 ++++ fs/btrfs/discard.c | 31 +++++++++++++ fs/btrfs/discard.h | 4 ++ fs/btrfs/free-space-cache.c | 106 ++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/free-space-cache.h | 2 + fs/btrfs/sysfs.c | 15 +++++++ 6 files changed, 158 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 71317047c321..533c5ea0e8c0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -101,6 +101,14 @@ struct btrfs_ref; #define BTRFS_MAX_EXTENT_SIZE SZ_128M +/* + * Deltas are an effective way to populate global statistics. Give macro names + * to make it clear what we're doing. An example is discard_extents in + * btrfs_free_space_ctl. + */ +#define BTRFS_STAT_NR_ENTRIES 2 +#define BTRFS_STAT_CURR 0 +#define BTRFS_STAT_PREV 1 /* * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size @@ -458,6 +466,7 @@ struct btrfs_discard_ctl { spinlock_t lock; struct btrfs_block_group *block_group; struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; + atomic_t discardable_extents; }; /* delayed seq elem */ diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index cb9a23217ec6..99663641abf4 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -342,6 +342,35 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); } +/** + * btrfs_discard_update_discardable - propagate discard counters + * @block_group: block_group of interest + * @ctl: free_space_ctl of @block_group + * + * This propagates deltas of counters up to the discard_ctl. It maintains a + * current counter and a previous counter passing the delta up to the global + * stat. Then the current counter value becomes the previous counter value. + */ +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, + struct btrfs_free_space_ctl *ctl) +{ + struct btrfs_discard_ctl *discard_ctl; + s32 extents_delta; + + if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) + return; + + discard_ctl = &block_group->fs_info->discard_ctl; + + extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] - + ctl->discardable_extents[BTRFS_STAT_PREV]; + if (extents_delta) { + atomic_add(extents_delta, &discard_ctl->discardable_extents); + ctl->discardable_extents[BTRFS_STAT_PREV] = + ctl->discardable_extents[BTRFS_STAT_CURR]; + } +} + /** * btrfs_discard_punt_unused_bgs_list - punt unused_bgs list to discard lists * @fs_info: fs_info of interest @@ -423,6 +452,8 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) INIT_LIST_HEAD(&discard_ctl->discard_list[i]); + + atomic_set(&discard_ctl->discardable_extents, 0); } void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index 3c5a04f8714f..0f2f89b1b0b9 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -16,6 +16,10 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, bool override); bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl); +/* Update operations */ +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, + struct btrfs_free_space_ctl *ctl); + /* Setup/cleanup operations */ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info); void btrfs_discard_resume(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2e8fbd67ec9b..ed97d082feac 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -32,6 +32,8 @@ struct btrfs_trim_range { struct list_head list; }; +static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *bitmap_info); static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); static void unlink_free_space(struct btrfs_free_space_ctl *ctl, @@ -816,12 +818,17 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ret = io_ctl_read_bitmap(&io_ctl, e); if (ret) goto free_cache; + e->bitmap_extents = count_bitmap_extents(ctl, e); + if (!btrfs_free_space_trimmed(e)) + ctl->discardable_extents[BTRFS_STAT_CURR] += + e->bitmap_extents; } io_ctl_drop_pages(&io_ctl); merge_space_tree(ctl); ret = 1; out: + btrfs_discard_update_discardable(ctl->private, ctl); io_ctl_free(&io_ctl); return ret; free_cache: @@ -1635,6 +1642,9 @@ __unlink_free_space(struct btrfs_free_space_ctl *ctl, { rb_erase(&info->offset_index, &ctl->free_space_offset); ctl->free_extents--; + + if (!info->bitmap && !btrfs_free_space_trimmed(info)) + ctl->discardable_extents[BTRFS_STAT_CURR]--; } static void unlink_free_space(struct btrfs_free_space_ctl *ctl, @@ -1655,6 +1665,9 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, if (ret) return ret; + if (!info->bitmap && !btrfs_free_space_trimmed(info)) + ctl->discardable_extents[BTRFS_STAT_CURR]++; + ctl->free_space += info->bytes; ctl->free_extents++; return ret; @@ -1711,17 +1724,29 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, count; + unsigned long start, count, end; + int extent_delta = -1; start = offset_to_bit(info->offset, ctl->unit, offset); count = bytes_to_bits(bytes, ctl->unit); - ASSERT(start + count <= BITS_PER_BITMAP); + end = start + count; + ASSERT(end <= BITS_PER_BITMAP); bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; if (info->max_extent_size > ctl->unit) info->max_extent_size = 0; + + if (start && test_bit(start - 1, info->bitmap)) + extent_delta++; + + if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) + extent_delta++; + + info->bitmap_extents += extent_delta; + if (!btrfs_free_space_trimmed(info)) + ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; } static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, @@ -1736,16 +1761,28 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, count; + unsigned long start, count, end; + int extent_delta = 1; start = offset_to_bit(info->offset, ctl->unit, offset); count = bytes_to_bits(bytes, ctl->unit); - ASSERT(start + count <= BITS_PER_BITMAP); + end = start + count; + ASSERT(end <= BITS_PER_BITMAP); bitmap_set(info->bitmap, start, count); info->bytes += bytes; ctl->free_space += bytes; + + if (start && test_bit(start - 1, info->bitmap)) + extent_delta--; + + if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) + extent_delta--; + + info->bitmap_extents += extent_delta; + if (!btrfs_free_space_trimmed(info)) + ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; } /* @@ -1881,11 +1918,35 @@ out: return NULL; } +static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *bitmap_info) +{ + struct btrfs_block_group *block_group = ctl->private; + u64 bytes = bitmap_info->bytes; + unsigned int rs, re; + int count = 0; + + if (!block_group || !bytes) + return count; + + bitmap_for_each_set_region(bitmap_info->bitmap, rs, re, 0, + BITS_PER_BITMAP) { + bytes -= (rs - re) * ctl->unit; + count++; + + if (!bytes) + break; + } + + return count; +} + static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset) { info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; + info->bitmap_extents = 0; INIT_LIST_HEAD(&info->list); link_free_space(ctl, info); ctl->total_bitmaps++; @@ -1991,8 +2052,12 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, * This is a tradeoff to make bitmap trim state minimal. We mark the * whole bitmap untrimmed if at any point we add untrimmed regions. */ - if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) + if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) { + if (btrfs_free_space_trimmed(info)) + ctl->discardable_extents[BTRFS_STAT_CURR] += + info->bitmap_extents; info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + } end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); @@ -2428,6 +2493,7 @@ link: if (ret) kmem_cache_free(btrfs_free_space_cachep, info); out: + btrfs_discard_update_discardable(block_group, ctl); spin_unlock(&ctl->tree_lock); if (ret) { @@ -2560,6 +2626,7 @@ again: goto again; } out_lock: + btrfs_discard_update_discardable(block_group, ctl); spin_unlock(&ctl->tree_lock); out: return ret; @@ -2645,8 +2712,16 @@ __btrfs_return_cluster_to_free_space( bitmap = (entry->bitmap != NULL); if (!bitmap) { + /* Merging treats extents as if they were new */ + if (!btrfs_free_space_trimmed(entry)) + ctl->discardable_extents[BTRFS_STAT_CURR]--; + try_merge_free_space(ctl, entry, false); steal_from_bitmap(ctl, entry, false); + + /* As we insert directly, update these statistics */ + if (!btrfs_free_space_trimmed(entry)) + ctl->discardable_extents[BTRFS_STAT_CURR]++; } tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, bitmap); @@ -2703,6 +2778,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) cond_resched_lock(&ctl->tree_lock); } __btrfs_remove_free_space_cache_locked(ctl); + btrfs_discard_update_discardable(block_group, ctl); spin_unlock(&ctl->tree_lock); } @@ -2777,6 +2853,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, link_free_space(ctl, entry); } out: + btrfs_discard_update_discardable(block_group, ctl); spin_unlock(&ctl->tree_lock); if (align_gap_len) @@ -2941,6 +3018,8 @@ out: entry->bitmap); ctl->total_bitmaps--; ctl->op->recalc_thresholds(ctl); + } else if (!btrfs_free_space_trimmed(entry)) { + ctl->discardable_extents[BTRFS_STAT_CURR]--; } kmem_cache_free(btrfs_free_space_cachep, entry); } @@ -3435,15 +3514,24 @@ static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) spin_lock(&ctl->tree_lock); entry = tree_search_offset(ctl, offset, 1, 0); - if (entry) + if (entry) { + if (btrfs_free_space_trimmed(entry)) + ctl->discardable_extents[BTRFS_STAT_CURR] += + entry->bitmap_extents; entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + } + spin_unlock(&ctl->tree_lock); } -static void end_trimming_bitmap(struct btrfs_free_space *entry) +static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *entry) { - if (btrfs_free_space_trimming_bitmap(entry)) + if (btrfs_free_space_trimming_bitmap(entry)) { entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; + ctl->discardable_extents[BTRFS_STAT_CURR] -= + entry->bitmap_extents; + } } /* @@ -3501,7 +3589,7 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, * if BTRFS_TRIM_STATE_TRIMMED is set on a bitmap. */ if (ret2 && !minlen) - end_trimming_bitmap(entry); + end_trimming_bitmap(ctl, entry); else entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; spin_unlock(&ctl->tree_lock); diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 765c5cbd99a5..bd1e7a542271 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -28,6 +28,7 @@ struct btrfs_free_space { unsigned long *bitmap; struct list_head list; enum btrfs_trim_state trim_state; + s32 bitmap_extents; }; static inline bool btrfs_free_space_trimmed(struct btrfs_free_space *info) @@ -50,6 +51,7 @@ struct btrfs_free_space_ctl { int total_bitmaps; int unit; u64 start; + s32 discardable_extents[BTRFS_STAT_NR_ENTRIES]; const struct btrfs_free_space_op *op; void *private; struct mutex cache_writeout_mutex; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 0eec1a8d45bd..0ac122c67065 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -12,6 +12,7 @@ #include #include "ctree.h" +#include "discard.h" #include "disk-io.h" #include "transaction.h" #include "sysfs.h" @@ -341,7 +342,21 @@ static const struct attribute_group btrfs_static_feature_attr_group = { /* * Discard statistics and tunables */ +#define discard_to_fs_info(_kobj) to_fs_info((_kobj)->parent->parent) + +static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%d\n", + atomic_read(&fs_info->discard_ctl.discardable_extents)); +} +BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show); + static const struct attribute *discard_debug_attrs[] = { + BTRFS_ATTR_PTR(discard, discardable_extents), NULL, }; -- cgit v1.2.3-59-g8ed1b From 5dc7c10b87474c98116d3438739743cd77263e9f Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Fri, 13 Dec 2019 16:22:21 -0800 Subject: btrfs: keep track of discardable_bytes for async discard Keep track of this metric so that we can understand how ahead or behind we are in discarding rate. This uses the same accounting method as discardable_extents, deltas between previous/current values and propagating them up. Signed-off-by: Dennis Zhou Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/discard.c | 10 ++++++++++ fs/btrfs/free-space-cache.c | 41 ++++++++++++++++++++++++++++++++--------- fs/btrfs/free-space-cache.h | 1 + fs/btrfs/sysfs.c | 12 ++++++++++++ 5 files changed, 56 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 533c5ea0e8c0..7c1c236d13ae 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -467,6 +467,7 @@ struct btrfs_discard_ctl { struct btrfs_block_group *block_group; struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; atomic_t discardable_extents; + atomic64_t discardable_bytes; }; /* delayed seq elem */ diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 99663641abf4..62298a438fa5 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -356,6 +356,7 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, { struct btrfs_discard_ctl *discard_ctl; s32 extents_delta; + s64 bytes_delta; if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) return; @@ -369,6 +370,14 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, ctl->discardable_extents[BTRFS_STAT_PREV] = ctl->discardable_extents[BTRFS_STAT_CURR]; } + + bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] - + ctl->discardable_bytes[BTRFS_STAT_PREV]; + if (bytes_delta) { + atomic64_add(bytes_delta, &discard_ctl->discardable_bytes); + ctl->discardable_bytes[BTRFS_STAT_PREV] = + ctl->discardable_bytes[BTRFS_STAT_CURR]; + } } /** @@ -454,6 +463,7 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&discard_ctl->discard_list[i]); atomic_set(&discard_ctl->discardable_extents, 0); + atomic64_set(&discard_ctl->discardable_bytes, 0); } void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ed97d082feac..40fb918a82f4 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -819,9 +819,11 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, if (ret) goto free_cache; e->bitmap_extents = count_bitmap_extents(ctl, e); - if (!btrfs_free_space_trimmed(e)) + if (!btrfs_free_space_trimmed(e)) { ctl->discardable_extents[BTRFS_STAT_CURR] += e->bitmap_extents; + ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes; + } } io_ctl_drop_pages(&io_ctl); @@ -1643,8 +1645,10 @@ __unlink_free_space(struct btrfs_free_space_ctl *ctl, rb_erase(&info->offset_index, &ctl->free_space_offset); ctl->free_extents--; - if (!info->bitmap && !btrfs_free_space_trimmed(info)) + if (!info->bitmap && !btrfs_free_space_trimmed(info)) { ctl->discardable_extents[BTRFS_STAT_CURR]--; + ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; + } } static void unlink_free_space(struct btrfs_free_space_ctl *ctl, @@ -1665,8 +1669,10 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, if (ret) return ret; - if (!info->bitmap && !btrfs_free_space_trimmed(info)) + if (!info->bitmap && !btrfs_free_space_trimmed(info)) { ctl->discardable_extents[BTRFS_STAT_CURR]++; + ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; + } ctl->free_space += info->bytes; ctl->free_extents++; @@ -1745,8 +1751,10 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, extent_delta++; info->bitmap_extents += extent_delta; - if (!btrfs_free_space_trimmed(info)) + if (!btrfs_free_space_trimmed(info)) { ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; + ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; + } } static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, @@ -1781,8 +1789,10 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, extent_delta--; info->bitmap_extents += extent_delta; - if (!btrfs_free_space_trimmed(info)) + if (!btrfs_free_space_trimmed(info)) { ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; + ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; + } } /* @@ -2053,9 +2063,11 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, * whole bitmap untrimmed if at any point we add untrimmed regions. */ if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) { - if (btrfs_free_space_trimmed(info)) + if (btrfs_free_space_trimmed(info)) { ctl->discardable_extents[BTRFS_STAT_CURR] += info->bitmap_extents; + ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; + } info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; } @@ -2713,15 +2725,21 @@ __btrfs_return_cluster_to_free_space( bitmap = (entry->bitmap != NULL); if (!bitmap) { /* Merging treats extents as if they were new */ - if (!btrfs_free_space_trimmed(entry)) + if (!btrfs_free_space_trimmed(entry)) { ctl->discardable_extents[BTRFS_STAT_CURR]--; + ctl->discardable_bytes[BTRFS_STAT_CURR] -= + entry->bytes; + } try_merge_free_space(ctl, entry, false); steal_from_bitmap(ctl, entry, false); /* As we insert directly, update these statistics */ - if (!btrfs_free_space_trimmed(entry)) + if (!btrfs_free_space_trimmed(entry)) { ctl->discardable_extents[BTRFS_STAT_CURR]++; + ctl->discardable_bytes[BTRFS_STAT_CURR] += + entry->bytes; + } } tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, bitmap); @@ -3011,6 +3029,8 @@ out: spin_lock(&ctl->tree_lock); ctl->free_space -= bytes; + if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) + ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; if (entry->bytes == 0) { ctl->free_extents--; if (entry->bitmap) { @@ -3515,9 +3535,11 @@ static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) spin_lock(&ctl->tree_lock); entry = tree_search_offset(ctl, offset, 1, 0); if (entry) { - if (btrfs_free_space_trimmed(entry)) + if (btrfs_free_space_trimmed(entry)) { ctl->discardable_extents[BTRFS_STAT_CURR] += entry->bitmap_extents; + ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; + } entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; } @@ -3531,6 +3553,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; ctl->discardable_extents[BTRFS_STAT_CURR] -= entry->bitmap_extents; + ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; } } diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index bd1e7a542271..5018190a49a3 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -52,6 +52,7 @@ struct btrfs_free_space_ctl { int unit; u64 start; s32 discardable_extents[BTRFS_STAT_NR_ENTRIES]; + s64 discardable_bytes[BTRFS_STAT_NR_ENTRIES]; const struct btrfs_free_space_op *op; void *private; struct mutex cache_writeout_mutex; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 0ac122c67065..ce1da3fcd6c9 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -344,6 +344,17 @@ static const struct attribute_group btrfs_static_feature_attr_group = { */ #define discard_to_fs_info(_kobj) to_fs_info((_kobj)->parent->parent) +static ssize_t btrfs_discardable_bytes_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%lld\n", + atomic64_read(&fs_info->discard_ctl.discardable_bytes)); +} +BTRFS_ATTR(discard, discardable_bytes, btrfs_discardable_bytes_show); + static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -356,6 +367,7 @@ static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show); static const struct attribute *discard_debug_attrs[] = { + BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), NULL, }; -- cgit v1.2.3-59-g8ed1b From a2309300841207de28307ecd2f0e031fccde37a3 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:35 -0500 Subject: btrfs: calculate discard delay based on number of extents An earlier patch keeps track of discardable_extents. These are undiscarded extents managed by the free space cache. Here, we will use this to dynamically calculate the discard delay interval. There are 3 rate to consider. The first is the target convergence rate, the rate to discard all discardable_extents over the BTRFS_DISCARD_TARGET_MSEC time frame. This is clamped by the lower limit, the iops limit or BTRFS_DISCARD_MIN_DELAY (1ms), and the upper limit, BTRFS_DISCARD_MAX_DELAY (1s). We reevaluate this delay every transaction commit. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/discard.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/discard.h | 1 + fs/btrfs/extent-tree.c | 4 +++- fs/btrfs/sysfs.c | 31 ++++++++++++++++++++++++++++ 5 files changed, 88 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7c1c236d13ae..451dade0a4bb 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -468,6 +468,8 @@ struct btrfs_discard_ctl { struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; atomic_t discardable_extents; atomic64_t discardable_bytes; + unsigned long delay; + u32 iops_limit; }; /* delayed seq elem */ diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 62298a438fa5..29645676427b 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -15,6 +15,12 @@ #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) +/* Target completion latency of discarding all discardable extents */ +#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC) +#define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL) +#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) +#define BTRFS_DISCARD_MAX_IOPS (10U) + static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) { @@ -235,11 +241,17 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, block_group = find_next_block_group(discard_ctl, now); if (block_group) { - u64 delay = 0; + unsigned long delay = discard_ctl->delay; + + /* + * This timeout is to hopefully prevent immediate discarding + * in a recently allocated block group. + */ + if (now < block_group->discard_eligible_time) { + u64 bg_timeout = block_group->discard_eligible_time - now; - if (now < block_group->discard_eligible_time) - delay = nsecs_to_jiffies( - block_group->discard_eligible_time - now); + delay = max(delay, nsecs_to_jiffies(bg_timeout)); + } mod_delayed_work(discard_ctl->discard_workers, &discard_ctl->work, delay); @@ -342,6 +354,39 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); } +/** + * btrfs_discard_calc_delay - recalculate the base delay + * @discard_ctl: discard control + * + * Recalculate the base delay which is based off the total number of + * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms) + * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC). + */ +void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) +{ + s32 discardable_extents; + u32 iops_limit; + unsigned long delay; + unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC; + + discardable_extents = atomic_read(&discard_ctl->discardable_extents); + if (!discardable_extents) + return; + + spin_lock(&discard_ctl->lock); + + iops_limit = READ_ONCE(discard_ctl->iops_limit); + if (iops_limit) + lower_limit = max_t(unsigned long, lower_limit, + MSEC_PER_SEC / iops_limit); + + delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents; + delay = clamp(delay, lower_limit, BTRFS_DISCARD_MAX_DELAY_MSEC); + discard_ctl->delay = msecs_to_jiffies(delay); + + spin_unlock(&discard_ctl->lock); +} + /** * btrfs_discard_update_discardable - propagate discard counters * @block_group: block_group of interest @@ -464,6 +509,8 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) atomic_set(&discard_ctl->discardable_extents, 0); atomic64_set(&discard_ctl->discardable_bytes, 0); + discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC; + discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; } void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index 0f2f89b1b0b9..5250fe178e49 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -17,6 +17,7 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl); /* Update operations */ +void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl); void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, struct btrfs_free_space_ctl *ctl); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2c12366cfde5..0163fdd59f8f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2935,8 +2935,10 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) cond_resched(); } - if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) + if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { + btrfs_discard_calc_delay(&fs_info->discard_ctl); btrfs_discard_schedule_work(&fs_info->discard_ctl, true); + } /* * Transaction is finished. We don't need the lock anymore. We diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index ce1da3fcd6c9..4155174245ff 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -366,9 +366,40 @@ static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, } BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show); +static ssize_t btrfs_discard_iops_limit_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%u\n", + READ_ONCE(fs_info->discard_ctl.iops_limit)); +} + +static ssize_t btrfs_discard_iops_limit_store(struct kobject *kobj, + struct kobj_attribute *a, + const char *buf, size_t len) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; + u32 iops_limit; + int ret; + + ret = kstrtou32(buf, 10, &iops_limit); + if (ret) + return -EINVAL; + + WRITE_ONCE(discard_ctl->iops_limit, iops_limit); + + return len; +} +BTRFS_ATTR_RW(discard, iops_limit, btrfs_discard_iops_limit_show, + btrfs_discard_iops_limit_store); + static const struct attribute *discard_debug_attrs[] = { BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), + BTRFS_ATTR_PTR(discard, iops_limit), NULL, }; -- cgit v1.2.3-59-g8ed1b From e93591bb6ecf3e31c8f5366eac143f4f9c270915 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:36 -0500 Subject: btrfs: add kbps discard rate limit for async discard Provide the ability to rate limit based on kbps in addition to iops as additional guides for the target discard rate. The delay used ends up being max(kbps_delay, iops_delay). Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/discard.c | 23 +++++++++++++++++++++-- fs/btrfs/sysfs.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 451dade0a4bb..f58b1cfeeca6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -466,10 +466,12 @@ struct btrfs_discard_ctl { spinlock_t lock; struct btrfs_block_group *block_group; struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; + u64 prev_discard; atomic_t discardable_extents; atomic64_t discardable_bytes; unsigned long delay; u32 iops_limit; + u32 kbps_limit; }; /* delayed seq elem */ diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 29645676427b..565931f9a9aa 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include "ctree.h" @@ -222,8 +223,8 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, * @override: override the current timer * * Discards are issued by a delayed workqueue item. @override is used to - * update the current delay as the baseline delay interview is reevaluated - * on transaction commit. This is also maxed with any other rate limit. + * update the current delay as the baseline delay interval is reevaluated on + * transaction commit. This is also maxed with any other rate limit. */ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, bool override) @@ -242,6 +243,20 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, block_group = find_next_block_group(discard_ctl, now); if (block_group) { unsigned long delay = discard_ctl->delay; + u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit); + + /* + * A single delayed workqueue item is responsible for + * discarding, so we can manage the bytes rate limit by keeping + * track of the previous discard. + */ + if (kbps_limit && discard_ctl->prev_discard) { + u64 bps_limit = ((u64)kbps_limit) * SZ_1K; + u64 bps_delay = div64_u64(discard_ctl->prev_discard * + MSEC_PER_SEC, bps_limit); + + delay = max(delay, msecs_to_jiffies(bps_delay)); + } /* * This timeout is to hopefully prevent immediate discarding @@ -316,6 +331,8 @@ static void btrfs_discard_workfn(struct work_struct *work) btrfs_block_group_end(block_group), 0, true); + discard_ctl->prev_discard = trimmed; + /* Determine next steps for a block_group */ if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { if (discard_state == BTRFS_DISCARD_BITMAPS) { @@ -507,10 +524,12 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) INIT_LIST_HEAD(&discard_ctl->discard_list[i]); + discard_ctl->prev_discard = 0; atomic_set(&discard_ctl->discardable_extents, 0); atomic64_set(&discard_ctl->discardable_bytes, 0); discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC; discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; + discard_ctl->kbps_limit = 0; } void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4155174245ff..0168bc12875e 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -396,10 +396,41 @@ static ssize_t btrfs_discard_iops_limit_store(struct kobject *kobj, BTRFS_ATTR_RW(discard, iops_limit, btrfs_discard_iops_limit_show, btrfs_discard_iops_limit_store); +static ssize_t btrfs_discard_kbps_limit_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%u\n", + READ_ONCE(fs_info->discard_ctl.kbps_limit)); +} + +static ssize_t btrfs_discard_kbps_limit_store(struct kobject *kobj, + struct kobj_attribute *a, + const char *buf, size_t len) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; + u32 kbps_limit; + int ret; + + ret = kstrtou32(buf, 10, &kbps_limit); + if (ret) + return -EINVAL; + + WRITE_ONCE(discard_ctl->kbps_limit, kbps_limit); + + return len; +} +BTRFS_ATTR_RW(discard, kbps_limit, btrfs_discard_kbps_limit_show, + btrfs_discard_kbps_limit_store); + static const struct attribute *discard_debug_attrs[] = { BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), BTRFS_ATTR_PTR(discard, iops_limit), + BTRFS_ATTR_PTR(discard, kbps_limit), NULL, }; -- cgit v1.2.3-59-g8ed1b From 4aa9ad520398bf9cef70fc9c363567da44312045 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:37 -0500 Subject: btrfs: limit max discard size for async discard Throttle the maximum size of a discard so that we can provide an upper bound for the rate of async discard. While the block layer is able to split discards into the appropriate sized discards, we want to be able to account more accurately the rate at which we are consuming NCQ slots as well as limit the upper bound of work for a discard. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/discard.h | 5 +++++ fs/btrfs/free-space-cache.c | 41 ++++++++++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index 5250fe178e49..562c60fab77a 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -3,10 +3,15 @@ #ifndef BTRFS_DISCARD_H #define BTRFS_DISCARD_H +#include + struct btrfs_fs_info; struct btrfs_discard_ctl; struct btrfs_block_group; +/* Discard size limits */ +#define BTRFS_ASYNC_DISCARD_MAX_SIZE (SZ_64M) + /* Work operations */ void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 40fb918a82f4..438043aab6fb 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3466,16 +3466,36 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, extent_start = entry->offset; extent_bytes = entry->bytes; extent_trim_state = entry->trim_state; - start = max(start, extent_start); - bytes = min(extent_start + extent_bytes, end) - start; - if (bytes < minlen) { - spin_unlock(&ctl->tree_lock); - mutex_unlock(&ctl->cache_writeout_mutex); - goto next; - } + if (async) { + start = entry->offset; + bytes = entry->bytes; + if (bytes < minlen) { + spin_unlock(&ctl->tree_lock); + mutex_unlock(&ctl->cache_writeout_mutex); + goto next; + } + unlink_free_space(ctl, entry); + if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) { + bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; + extent_bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; + entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE; + entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE; + link_free_space(ctl, entry); + } else { + kmem_cache_free(btrfs_free_space_cachep, entry); + } + } else { + start = max(start, extent_start); + bytes = min(extent_start + extent_bytes, end) - start; + if (bytes < minlen) { + spin_unlock(&ctl->tree_lock); + mutex_unlock(&ctl->cache_writeout_mutex); + goto next; + } - unlink_free_space(ctl, entry); - kmem_cache_free(btrfs_free_space_cachep, entry); + unlink_free_space(ctl, entry); + kmem_cache_free(btrfs_free_space_cachep, entry); + } spin_unlock(&ctl->tree_lock); trim_entry.start = extent_start; @@ -3639,6 +3659,9 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, goto next; } + if (async && bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) + bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; + bitmap_clear_bits(ctl, entry, start, bytes); if (entry->bytes == 0) free_bitmap(ctl, entry); -- cgit v1.2.3-59-g8ed1b From 19b2a2c71979f849cadc33af3577f739cc95e1f0 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:38 -0500 Subject: btrfs: make max async discard size tunable Expose max_discard_size as a tunable via sysfs and switch the current fixed maximum to the default value. Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/discard.c | 1 + fs/btrfs/discard.h | 2 +- fs/btrfs/free-space-cache.c | 20 +++++++++++++------- fs/btrfs/sysfs.c | 31 +++++++++++++++++++++++++++++++ 5 files changed, 47 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f58b1cfeeca6..fa6c95fdf5c6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -469,6 +469,7 @@ struct btrfs_discard_ctl { u64 prev_discard; atomic_t discardable_extents; atomic64_t discardable_bytes; + u64 max_discard_size; unsigned long delay; u32 iops_limit; u32 kbps_limit; diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 565931f9a9aa..5fdd82c356a4 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -527,6 +527,7 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) discard_ctl->prev_discard = 0; atomic_set(&discard_ctl->discardable_extents, 0); atomic64_set(&discard_ctl->discardable_bytes, 0); + discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE; discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC; discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; discard_ctl->kbps_limit = 0; diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index 562c60fab77a..f9445ee5dcc3 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -10,7 +10,7 @@ struct btrfs_discard_ctl; struct btrfs_block_group; /* Discard size limits */ -#define BTRFS_ASYNC_DISCARD_MAX_SIZE (SZ_64M) +#define BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE (SZ_64M) /* Work operations */ void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 438043aab6fb..db90bea30a8d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3428,6 +3428,8 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen, bool async) { + struct btrfs_discard_ctl *discard_ctl = + &block_group->fs_info->discard_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; @@ -3436,6 +3438,7 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, u64 extent_bytes; enum btrfs_trim_state extent_trim_state; u64 bytes; + const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); while (start < end) { struct btrfs_trim_range trim_entry; @@ -3475,11 +3478,11 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, goto next; } unlink_free_space(ctl, entry); - if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) { - bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; - extent_bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; - entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE; - entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE; + if (max_discard_size && bytes > max_discard_size) { + bytes = max_discard_size; + extent_bytes = max_discard_size; + entry->offset += max_discard_size; + entry->bytes -= max_discard_size; link_free_space(ctl, entry); } else { kmem_cache_free(btrfs_free_space_cachep, entry); @@ -3584,12 +3587,15 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen, bool async) { + struct btrfs_discard_ctl *discard_ctl = + &block_group->fs_info->discard_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; int ret = 0; int ret2; u64 bytes; u64 offset = offset_to_bitmap(ctl, start); + const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); while (offset < end) { bool next_bitmap = false; @@ -3659,8 +3665,8 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, goto next; } - if (async && bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) - bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE; + if (async && max_discard_size && bytes > max_discard_size) + bytes = max_discard_size; bitmap_clear_bits(ctl, entry, start, bytes); if (entry->bytes == 0) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 0168bc12875e..eac0155bf8c7 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -426,11 +426,42 @@ static ssize_t btrfs_discard_kbps_limit_store(struct kobject *kobj, BTRFS_ATTR_RW(discard, kbps_limit, btrfs_discard_kbps_limit_show, btrfs_discard_kbps_limit_store); +static ssize_t btrfs_discard_max_discard_size_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%llu\n", + READ_ONCE(fs_info->discard_ctl.max_discard_size)); +} + +static ssize_t btrfs_discard_max_discard_size_store(struct kobject *kobj, + struct kobj_attribute *a, + const char *buf, size_t len) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; + u64 max_discard_size; + int ret; + + ret = kstrtou64(buf, 10, &max_discard_size); + if (ret) + return -EINVAL; + + WRITE_ONCE(discard_ctl->max_discard_size, max_discard_size); + + return len; +} +BTRFS_ATTR_RW(discard, max_discard_size, btrfs_discard_max_discard_size_show, + btrfs_discard_max_discard_size_store); + static const struct attribute *discard_debug_attrs[] = { BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), BTRFS_ATTR_PTR(discard, iops_limit), BTRFS_ATTR_PTR(discard, kbps_limit), + BTRFS_ATTR_PTR(discard, max_discard_size), NULL, }; -- cgit v1.2.3-59-g8ed1b From 7fe6d45e4009d9502fef32ac6222862ac17f8674 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:39 -0500 Subject: btrfs: have multiple discard lists Non-block group destruction discarding currently only had a single list with no minimum discard length. This can lead to caravaning more meaningful discards behind a heavily fragmented block group. This adds support for multiple lists with minimum discard lengths to prevent the caravan effect. We promote block groups back up when we exceed the BTRFS_ASYNC_DISCARD_MAX_FILTER size, currently we support only 2 lists with filters of 1MB and 32KB respectively. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/discard.c | 105 ++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/discard.h | 5 +++ fs/btrfs/free-space-cache.c | 55 +++++++++++++++++------ fs/btrfs/free-space-cache.h | 2 +- 5 files changed, 145 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fa6c95fdf5c6..486b9d1532eb 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -456,7 +456,7 @@ struct btrfs_full_stripe_locks_tree { * afterwards represent monotonically decreasing discard filter sizes to * prioritize what should be discarded next. */ -#define BTRFS_NR_DISCARD_LISTS 2 +#define BTRFS_NR_DISCARD_LISTS 3 #define BTRFS_DISCARD_INDEX_UNUSED 0 #define BTRFS_DISCARD_INDEX_START 1 diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 5fdd82c356a4..0299981d0c62 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -22,6 +22,13 @@ #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) #define BTRFS_DISCARD_MAX_IOPS (10U) +/* Montonically decreasing minimum length filters after index 0 */ +static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { + 0, + BTRFS_ASYNC_DISCARD_MAX_FILTER, + BTRFS_ASYNC_DISCARD_MIN_FILTER +}; + static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) { @@ -139,16 +146,18 @@ static struct btrfs_block_group *find_next_block_group( * peek_discard_list - wrap find_next_block_group() * @discard_ctl: discard control * @discard_state: the discard_state of the block_group after state management + * @discard_index: the discard_index of the block_group after state management * * This wraps find_next_block_group() and sets the block_group to be in use. * discard_state's control flow is managed here. Variables related to - * discard_state are reset here as needed (eg. discard_cursor). @discard_state - * is remembered as it may change while we're discarding, but we want the - * discard to execute in the context determined here. + * discard_state are reset here as needed (eg discard_cursor). @discard_state + * and @discard_index are remembered as it may change while we're discarding, + * but we want the discard to execute in the context determined here. */ static struct btrfs_block_group *peek_discard_list( struct btrfs_discard_ctl *discard_ctl, - enum btrfs_discard_state *discard_state) + enum btrfs_discard_state *discard_state, + int *discard_index) { struct btrfs_block_group *block_group; const u64 now = ktime_get_ns(); @@ -169,6 +178,7 @@ again: } discard_ctl->block_group = block_group; *discard_state = block_group->discard_state; + *discard_index = block_group->discard_index; } else { block_group = NULL; } @@ -178,6 +188,64 @@ again: return block_group; } +/** + * btrfs_discard_check_filter - updates a block groups filters + * @block_group: block group of interest + * @bytes: recently freed region size after coalescing + * + * Async discard maintains multiple lists with progressively smaller filters + * to prioritize discarding based on size. Should a free space that matches + * a larger filter be returned to the free_space_cache, prioritize that discard + * by moving @block_group to the proper filter. + */ +void btrfs_discard_check_filter(struct btrfs_block_group *block_group, + u64 bytes) +{ + struct btrfs_discard_ctl *discard_ctl; + + if (!block_group || + !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) + return; + + discard_ctl = &block_group->fs_info->discard_ctl; + + if (block_group->discard_index > BTRFS_DISCARD_INDEX_START && + bytes >= discard_minlen[block_group->discard_index - 1]) { + int i; + + remove_from_discard_list(discard_ctl, block_group); + + for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS; + i++) { + if (bytes >= discard_minlen[i]) { + block_group->discard_index = i; + add_to_discard_list(discard_ctl, block_group); + break; + } + } + } +} + +/** + * btrfs_update_discard_index - moves a block group along the discard lists + * @discard_ctl: discard control + * @block_group: block_group of interest + * + * Increment @block_group's discard_index. If it falls of the list, let it be. + * Otherwise add it back to the appropriate list. + */ +static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl, + struct btrfs_block_group *block_group) +{ + block_group->discard_index++; + if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) { + block_group->discard_index = 1; + return; + } + + add_to_discard_list(discard_ctl, block_group); +} + /** * btrfs_discard_cancel_work - remove a block_group from the discard lists * @discard_ctl: discard control @@ -295,6 +363,8 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl, btrfs_mark_bg_unused(block_group); else add_to_discard_unused_list(discard_ctl, block_group); + } else { + btrfs_update_discard_index(discard_ctl, block_group); } } @@ -311,25 +381,42 @@ static void btrfs_discard_workfn(struct work_struct *work) struct btrfs_discard_ctl *discard_ctl; struct btrfs_block_group *block_group; enum btrfs_discard_state discard_state; + int discard_index = 0; u64 trimmed = 0; + u64 minlen = 0; discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); - block_group = peek_discard_list(discard_ctl, &discard_state); + block_group = peek_discard_list(discard_ctl, &discard_state, + &discard_index); if (!block_group || !btrfs_run_discard_work(discard_ctl)) return; /* Perform discarding */ - if (discard_state == BTRFS_DISCARD_BITMAPS) + minlen = discard_minlen[discard_index]; + + if (discard_state == BTRFS_DISCARD_BITMAPS) { + u64 maxlen = 0; + + /* + * Use the previous levels minimum discard length as the max + * length filter. In the case something is added to make a + * region go beyond the max filter, the entire bitmap is set + * back to BTRFS_TRIM_STATE_UNTRIMMED. + */ + if (discard_index != BTRFS_DISCARD_INDEX_UNUSED) + maxlen = discard_minlen[discard_index - 1]; + btrfs_trim_block_group_bitmaps(block_group, &trimmed, block_group->discard_cursor, btrfs_block_group_end(block_group), - 0, true); - else + minlen, maxlen, true); + } else { btrfs_trim_block_group_extents(block_group, &trimmed, block_group->discard_cursor, btrfs_block_group_end(block_group), - 0, true); + minlen, true); + } discard_ctl->prev_discard = trimmed; diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h index f9445ee5dcc3..21a15776dac4 100644 --- a/fs/btrfs/discard.h +++ b/fs/btrfs/discard.h @@ -11,6 +11,11 @@ struct btrfs_block_group; /* Discard size limits */ #define BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE (SZ_64M) +#define BTRFS_ASYNC_DISCARD_MAX_FILTER (SZ_1M) +#define BTRFS_ASYNC_DISCARD_MIN_FILTER (SZ_32K) + +/* List operations */ +void btrfs_discard_check_filter(struct btrfs_block_group *block_group, u64 bytes); /* Work operations */ void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index db90bea30a8d..a4340657b7f1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2465,6 +2465,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_block_group *block_group = ctl->private; struct btrfs_free_space *info; int ret = 0; + u64 filter_bytes = bytes; info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); if (!info) @@ -2501,6 +2502,8 @@ link: */ steal_from_bitmap(ctl, info, true); + filter_bytes = max(filter_bytes, info->bytes); + ret = link_free_space(ctl, info); if (ret) kmem_cache_free(btrfs_free_space_cachep, info); @@ -2513,8 +2516,10 @@ out: ASSERT(ret != -EEXIST); } - if (trim_state != BTRFS_TRIM_STATE_TRIMMED) + if (trim_state != BTRFS_TRIM_STATE_TRIMMED) { + btrfs_discard_check_filter(block_group, filter_bytes); btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); + } return ret; } @@ -3478,7 +3483,14 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, goto next; } unlink_free_space(ctl, entry); - if (max_discard_size && bytes > max_discard_size) { + /* + * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. + * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim + * X when we come back around. So trim it now. + */ + if (max_discard_size && + bytes >= (max_discard_size + + BTRFS_ASYNC_DISCARD_MIN_FILTER)) { bytes = max_discard_size; extent_bytes = max_discard_size; entry->offset += max_discard_size; @@ -3585,7 +3597,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, */ static int trim_bitmaps(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen, - bool async) + u64 maxlen, bool async) { struct btrfs_discard_ctl *discard_ctl = &block_group->fs_info->discard_ctl; @@ -3613,7 +3625,15 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, } entry = tree_search_offset(ctl, offset, 1, 0); - if (!entry || (async && start == offset && + /* + * Bitmaps are marked trimmed lossily now to prevent constant + * discarding of the same bitmap (the reason why we are bound + * by the filters). So, retrim the block group bitmaps when we + * are preparing to punt to the unused_bgs list. This uses + * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED + * which is the only discard index which sets minlen to 0. + */ + if (!entry || (async && minlen && start == offset && btrfs_free_space_trimmed(entry))) { spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); @@ -3634,10 +3654,10 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, ret2 = search_bitmap(ctl, entry, &start, &bytes, false); if (ret2 || start >= end) { /* - * This keeps the invariant that all bytes are trimmed - * if BTRFS_TRIM_STATE_TRIMMED is set on a bitmap. + * We lossily consider a bitmap trimmed if we only skip + * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER. */ - if (ret2 && !minlen) + if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER) end_trimming_bitmap(ctl, entry); else entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; @@ -3658,14 +3678,21 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, } bytes = min(bytes, end - start); - if (bytes < minlen) { - entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; + if (bytes < minlen || (async && maxlen && bytes > maxlen)) { spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); goto next; } - if (async && max_discard_size && bytes > max_discard_size) + /* + * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. + * If X < @minlen, we won't trim X when we come back around. + * So trim it now. We differ here from trimming extents as we + * don't keep individual state per bit. + */ + if (async && + max_discard_size && + bytes > (max_discard_size + minlen)) bytes = max_discard_size; bitmap_clear_bits(ctl, entry, start, bytes); @@ -3773,7 +3800,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, if (ret) goto out; - ret = trim_bitmaps(block_group, trimmed, start, end, minlen, false); + ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false); div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); /* If we ended in the middle of a bitmap, reset the trimming flag */ if (rem) @@ -3807,7 +3834,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen, - bool async) + u64 maxlen, bool async) { int ret; @@ -3821,7 +3848,9 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, btrfs_get_block_group_trimming(block_group); spin_unlock(&block_group->lock); - ret = trim_bitmaps(block_group, trimmed, start, end, minlen, async); + ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen, + async); + btrfs_put_block_group_trimming(block_group); return ret; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 5018190a49a3..2e0a8077aa74 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -146,7 +146,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, bool async); int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen, - bool async); + u64 maxlen, bool async); /* Support functions for running our sanity tests */ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS -- cgit v1.2.3-59-g8ed1b From 5cb0724e1b4653629c508906ca6098bae77d2f95 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:40 -0500 Subject: btrfs: only keep track of data extents for async discard As mentioned earlier, discarding data can be done either by issuing an explicit discard or implicitly by reusing the LBA. Metadata block_groups see much more frequent reuse due to well it being metadata. So instead of explicitly discarding metadata block_groups, just leave them be and let the latter implicit discarding be done for them. For mixed block_groups, block_groups which contain both metadata and data, we let them be as higher fragmentation is expected. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 11 +++++++++++ fs/btrfs/discard.c | 12 ++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index a8d2edcd8760..5cf1ea33f284 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -182,6 +182,17 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) return (block_group->start + block_group->length); } +static inline bool btrfs_is_block_group_data_only( + struct btrfs_block_group *block_group) +{ + /* + * In mixed mode the fragmentation is expected to be high, lowering the + * efficiency, so only proper data block groups are considered. + */ + return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && + !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); +} + #ifdef CONFIG_BTRFS_DEBUG static inline int btrfs_should_fragment_free_space( struct btrfs_block_group *block_group) diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 0299981d0c62..5af42e0317b7 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -57,6 +57,9 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) { + if (!btrfs_is_block_group_data_only(block_group)) + return; + spin_lock(&discard_ctl->lock); __add_to_discard_list(discard_ctl, block_group); spin_unlock(&discard_ctl->lock); @@ -169,7 +172,10 @@ again: if (block_group && now > block_group->discard_eligible_time) { if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && block_group->used != 0) { - __add_to_discard_list(discard_ctl, block_group); + if (btrfs_is_block_group_data_only(block_group)) + __add_to_discard_list(discard_ctl, block_group); + else + list_del_init(&block_group->discard_list); goto again; } if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) { @@ -507,7 +513,9 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group, s32 extents_delta; s64 bytes_delta; - if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) + if (!block_group || + !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) || + !btrfs_is_block_group_data_only(block_group)) return; discard_ctl = &block_group->fs_info->discard_ctl; -- cgit v1.2.3-59-g8ed1b From 9ddf648f9c2a492cef4e41e31c50515a817d0562 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:41 -0500 Subject: btrfs: keep track of discard reuse stats Keep track of how much we are discarding and how often we are reusing with async discard. The discard_*_bytes values don't need any special protection because the work item provides the single threaded access. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/discard.c | 5 +++++ fs/btrfs/free-space-cache.c | 14 ++++++++++++++ fs/btrfs/sysfs.c | 36 ++++++++++++++++++++++++++++++++++++ 4 files changed, 58 insertions(+) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 486b9d1532eb..569931dd0ce5 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -473,6 +473,9 @@ struct btrfs_discard_ctl { unsigned long delay; u32 iops_limit; u32 kbps_limit; + u64 discard_extent_bytes; + u64 discard_bitmap_bytes; + atomic64_t discard_bytes_saved; }; /* delayed seq elem */ diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 5af42e0317b7..40dcb5dcdc95 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -417,11 +417,13 @@ static void btrfs_discard_workfn(struct work_struct *work) block_group->discard_cursor, btrfs_block_group_end(block_group), minlen, maxlen, true); + discard_ctl->discard_bitmap_bytes += trimmed; } else { btrfs_trim_block_group_extents(block_group, &trimmed, block_group->discard_cursor, btrfs_block_group_end(block_group), minlen, true); + discard_ctl->discard_extent_bytes += trimmed; } discard_ctl->prev_discard = trimmed; @@ -626,6 +628,9 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info) discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC; discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; discard_ctl->kbps_limit = 0; + discard_ctl->discard_extent_bytes = 0; + discard_ctl->discard_bitmap_bytes = 0; + atomic64_set(&discard_ctl->discard_bytes_saved, 0); } void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a4340657b7f1..802e06d6303d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2842,6 +2842,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 *max_extent_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_discard_ctl *discard_ctl = + &block_group->fs_info->discard_ctl; struct btrfs_free_space *entry = NULL; u64 bytes_search = bytes + empty_size; u64 ret = 0; @@ -2858,6 +2860,10 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(ctl, entry, offset, bytes); + + if (!btrfs_free_space_trimmed(entry)) + atomic64_add(bytes, &discard_ctl->discard_bytes_saved); + if (!entry->bytes) free_bitmap(ctl, entry); } else { @@ -2866,6 +2872,9 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, align_gap = entry->offset; align_gap_trim_state = entry->trim_state; + if (!btrfs_free_space_trimmed(entry)) + atomic64_add(bytes, &discard_ctl->discard_bytes_saved); + entry->offset = offset + bytes; WARN_ON(entry->bytes < bytes + align_gap_len); @@ -2969,6 +2978,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, u64 min_start, u64 *max_extent_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_discard_ctl *discard_ctl = + &block_group->fs_info->discard_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; u64 ret = 0; @@ -3033,6 +3044,9 @@ out: spin_lock(&ctl->tree_lock); + if (!btrfs_free_space_trimmed(entry)) + atomic64_add(bytes, &discard_ctl->discard_bytes_saved); + ctl->free_space -= bytes; if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index eac0155bf8c7..55e4ed1af29c 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -366,6 +366,39 @@ static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, } BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show); +static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%lld\n", + fs_info->discard_ctl.discard_bitmap_bytes); +} +BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show); + +static ssize_t btrfs_discard_bytes_saved_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%lld\n", + atomic64_read(&fs_info->discard_ctl.discard_bytes_saved)); +} +BTRFS_ATTR(discard, discard_bytes_saved, btrfs_discard_bytes_saved_show); + +static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); + + return snprintf(buf, PAGE_SIZE, "%lld\n", + fs_info->discard_ctl.discard_extent_bytes); +} +BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show); + static ssize_t btrfs_discard_iops_limit_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -459,6 +492,9 @@ BTRFS_ATTR_RW(discard, max_discard_size, btrfs_discard_max_discard_size_show, static const struct attribute *discard_debug_attrs[] = { BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), + BTRFS_ATTR_PTR(discard, discard_bitmap_bytes), + BTRFS_ATTR_PTR(discard, discard_bytes_saved), + BTRFS_ATTR_PTR(discard, discard_extent_bytes), BTRFS_ATTR_PTR(discard, iops_limit), BTRFS_ATTR_PTR(discard, kbps_limit), BTRFS_ATTR_PTR(discard, max_discard_size), -- cgit v1.2.3-59-g8ed1b From dbc2a8c92756507e8183a4c23a02fa2a994eb640 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:42 -0500 Subject: btrfs: add async discard implementation overview Give a brief overview for how async discard is implemented. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/discard.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 40dcb5dcdc95..6f48ae1589d9 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -12,6 +12,45 @@ #include "discard.h" #include "free-space-cache.h" +/* + * This contains the logic to handle async discard. + * + * Async discard manages trimming of free space outside of transaction commit. + * Discarding is done by managing the block_groups on a LRU list based on free + * space recency. Two passes are used to first prioritize discarding extents + * and then allow for trimming in the bitmap the best opportunity to coalesce. + * The block_groups are maintained on multiple lists to allow for multiple + * passes with different discard filter requirements. A delayed work item is + * used to manage discarding with timeout determined by a max of the delay + * incurred by the iops rate limit, the byte rate limit, and the max delay of + * BTRFS_DISCARD_MAX_DELAY. + * + * Note, this only keeps track of block_groups that are explicitly for data. + * Mixed block_groups are not supported. + * + * The first list is special to manage discarding of fully free block groups. + * This is necessary because we issue a final trim for a full free block group + * after forgetting it. When a block group becomes unused, instead of directly + * being added to the unused_bgs list, we add it to this first list. Then + * from there, if it becomes fully discarded, we place it onto the unused_bgs + * list. + * + * The in-memory free space cache serves as the backing state for discard. + * Consequently this means there is no persistence. We opt to load all the + * block groups in as not discarded, so the mount case degenerates to the + * crashing case. + * + * As the free space cache uses bitmaps, there exists a tradeoff between + * ease/efficiency for find_free_extent() and the accuracy of discard state. + * Here we opt to let untrimmed regions merge with everything while only letting + * trimmed regions merge with other trimmed regions. This can cause + * overtrimming, but the coalescing benefit seems to be worth it. Additionally, + * bitmap state is tracked as a whole. If we're able to fully trim a bitmap, + * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in, + * this resets the state and we will retry trimming the whole bitmap. This is a + * tradeoff between discard state accuracy and the cost of accounting. + */ + /* This is an initial delay to give some chance for block reuse */ #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) -- cgit v1.2.3-59-g8ed1b From 5d90c5c75711d9734e9d3d38a6e3b849b7bea742 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:43 -0500 Subject: btrfs: increase the metadata allowance for the free_space_cache Currently, there is no way for the free space cache to recover from being serviced by purely bitmaps because the extent threshold is set to 0 in recalculate_thresholds() when we surpass the metadata allowance. This adds a recovery mechanism by keeping large extents out of the bitmaps and increases the metadata upper bound to 64KB. The recovery mechanism bypasses this upper bound, thus making it a soft upper bound. But, with the bypass being 1MB or greater, it shouldn't add unbounded overhead. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 802e06d6303d..f75eb737e713 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -24,7 +24,8 @@ #include "discard.h" #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) -#define MAX_CACHE_BYTES_PER_GIG SZ_32K +#define MAX_CACHE_BYTES_PER_GIG SZ_64K +#define FORCE_EXTENT_THRESHOLD SZ_1M struct btrfs_trim_range { u64 start; @@ -1694,26 +1695,17 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) ASSERT(ctl->total_bitmaps <= max_bitmaps); /* - * The goal is to keep the total amount of memory used per 1gb of space - * at or below 32k, so we need to adjust how much memory we allow to be - * used by extent based free space tracking + * We are trying to keep the total amount of memory used per 1GiB of + * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation + * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of + * bitmaps, we may end up using more memory than this. */ if (size < SZ_1G) max_bytes = MAX_CACHE_BYTES_PER_GIG; else max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G); - /* - * we want to account for 1 more bitmap than what we have so we can make - * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as - * we add more bitmaps. - */ - bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit; - - if (bitmap_bytes >= max_bytes) { - ctl->extents_thresh = 0; - return; - } + bitmap_bytes = ctl->total_bitmaps * ctl->unit; /* * we want the extent entry threshold to always be at most 1/2 the max @@ -2099,6 +2091,10 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, forced = true; #endif + /* This is a way to reclaim large regions from the bitmaps. */ + if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) + return false; + /* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap -- cgit v1.2.3-59-g8ed1b From f9bb615af2ba8724bff82376275a14b02eef5eb2 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:44 -0500 Subject: btrfs: make smaller extents more likely to go into bitmaps It's less than ideal for small extents to eat into our extent budget, so force extents <= 32KB into the bitmaps save for the first handful. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f75eb737e713..30e010852040 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2107,8 +2107,8 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, * of cache left then go ahead an dadd them, no sense in adding * the overhead of a bitmap if we don't have to. */ - if (info->bytes <= fs_info->sectorsize * 4) { - if (ctl->free_extents * 2 <= ctl->extents_thresh) + if (info->bytes <= fs_info->sectorsize * 8) { + if (ctl->free_extents * 3 <= ctl->extents_thresh) return false; } else { return false; -- cgit v1.2.3-59-g8ed1b From 27f0afc7376bf6e1405ee3a0ef0ab85171059ae8 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:45 -0500 Subject: btrfs: ensure removal of discardable_* in free_bitmap() Most callers of free_bitmap() only call it if bitmap_info->bytes is 0. However, there are certain cases where we may free the free space cache via __btrfs_remove_free_space_cache(). This exposes a path where free_bitmap() is called regardless. This may result in a bad accounting situation for discardable_bytes and discardable_extents. So, remove the stats and call btrfs_discard_update_discardable(). Signed-off-by: Dennis Zhou Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 30e010852040..0598fd3c6e3f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1959,6 +1959,18 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { + /* + * Normally when this is called, the bitmap is completely empty. However, + * if we are blowing up the free space cache for one reason or another + * via __btrfs_remove_free_space_cache(), then it may not be freed and + * we may leave stats on the table. + */ + if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { + ctl->discardable_extents[BTRFS_STAT_CURR] -= + bitmap_info->bitmap_extents; + ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; + + } unlink_free_space(ctl, bitmap_info); kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); @@ -2776,6 +2788,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) { spin_lock(&ctl->tree_lock); __btrfs_remove_free_space_cache_locked(ctl); + if (ctl->private) + btrfs_discard_update_discardable(ctl->private, ctl); spin_unlock(&ctl->tree_lock); } -- cgit v1.2.3-59-g8ed1b From 81b29a3bf7cce4373526ff91a7a89aa6505597f7 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 2 Jan 2020 16:26:46 -0500 Subject: btrfs: add correction to handle -1 edge case in async discard From Dave's testing described below, it's possible to drive a file system to have bogus values of discardable_extents and _bytes. As btrfs_discard_calc_delay() is the only user of discardable_extents, we can correct here for any negative discardable_extents/discardable_bytes. The problem is not reliably reproducible. The workload that created it was based on linux git tree, switching between release tags, then everytihng deleted followed by a full rebalance. At this state the values of discardable_bytes was 16K and discardable_extents was -1, expected values 0 and 0. Repeating the workload again did not correct the bogus values so the offset seems to be stable once it happens. Reported-by: David Sterba Signed-off-by: Dennis Zhou Signed-off-by: David Sterba --- fs/btrfs/discard.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 6f48ae1589d9..5615320fa659 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -516,6 +516,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) { s32 discardable_extents; + s64 discardable_bytes; u32 iops_limit; unsigned long delay; unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC; @@ -526,6 +527,27 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) spin_lock(&discard_ctl->lock); + /* + * The following is to fix a potential -1 discrepenancy that we're not + * sure how to reproduce. But given that this is the only place that + * utilizes these numbers and this is only called by from + * btrfs_finish_extent_commit() which is synchronized, we can correct + * here. + */ + if (discardable_extents < 0) + atomic_add(-discardable_extents, + &discard_ctl->discardable_extents); + + discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes); + if (discardable_bytes < 0) + atomic64_add(-discardable_bytes, + &discard_ctl->discardable_bytes); + + if (discardable_extents <= 0) { + spin_unlock(&discard_ctl->lock); + return; + } + iops_limit = READ_ONCE(discard_ctl->iops_limit); if (iops_limit) lower_limit = max_t(unsigned long, lower_limit, -- cgit v1.2.3-59-g8ed1b From ef0a82da81d689860a36cf058497dad2a5ae32c5 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Thu, 2 Jan 2020 17:14:57 +0100 Subject: btrfs: remove unnecessary wrapper get_alloc_profile btrfs_get_alloc_profile() is a simple wrapper over get_alloc_profile(). The only difference is btrfs_get_alloc_profile() is visible to other functions in btrfs while get_alloc_profile() is static and thus only visible to functions in block-group.c. Let's just fold get_alloc_profile() into btrfs_get_alloc_profile() to get rid of the unnecessary second function. Reviewed-by: Josef Bacik Reviewed-by: Anand Jain Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index ba71a84fd1ee..6480976ddc78 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -96,7 +96,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) return extended_to_chunk(flags | allowed); } -static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) +u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) { unsigned seq; u64 flags; @@ -116,11 +116,6 @@ static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) return btrfs_reduce_alloc_profile(fs_info, flags); } -u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) -{ - return get_alloc_profile(fs_info, orig_flags); -} - void btrfs_get_block_group(struct btrfs_block_group *cache) { atomic_inc(&cache->count); -- cgit v1.2.3-59-g8ed1b From 94f8c46566485974c155c5cb4507f03873cf4d71 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 6 Jan 2020 17:14:23 +0100 Subject: btrfs: remove unused member btrfs_device::work This is a leftover from recently removed bio scheduling framework. Fixes: ba8a9d079543 ("Btrfs: delete the entire async bio submission framework") Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/volumes.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 3c56ef571b00..81f21e42b887 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -120,8 +120,6 @@ struct btrfs_device { /* per-device scrub information */ struct scrub_ctx *scrub_ctx; - struct btrfs_work work; - /* readahead state */ atomic_t reada_in_flight; u64 reada_next; -- cgit v1.2.3-59-g8ed1b From 4babad10198fa73fe73239d02c2e99e3333f5f5c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 8 Jan 2020 15:29:53 +0100 Subject: btrfs: safely advance counter when looking up bio csums Dan's smatch tool reports fs/btrfs/file-item.c:295 btrfs_lookup_bio_sums() warn: should this be 'count == -1' which points to the while (count--) loop. With count == 0 the check itself could decrement it to -1. There's a WARN_ON a few lines below that has never been seen in practice though. It turns out that the value of page_bytes_left matches the count (by sectorsize multiples). The loop never reaches the state where count would go to -1, because page_bytes_left == 0 is found first and this breaks out. For clarity, use only plain check on count (and only for positive value), decrement safely inside the loop. Any other discrepancy after the whole bio list processing should be reported by the exising WARN_ON_ONCE as well. Reported-by: Dan Carpenter Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/file-item.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index bb374042d297..c2f365662d55 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -283,7 +283,8 @@ found: csum += count * csum_size; nblocks -= count; next: - while (count--) { + while (count > 0) { + count--; disk_bytenr += fs_info->sectorsize; offset += fs_info->sectorsize; page_bytes_left -= fs_info->sectorsize; -- cgit v1.2.3-59-g8ed1b From 556755a8a99be8ca3cd9fbe36aaf9b3b0339a00d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 3 Jan 2020 10:38:44 -0500 Subject: btrfs: fix improper setting of scanned for range cyclic write cache pages We noticed that we were having regular CG OOM kills in cases where there was still enough dirty pages to avoid OOM'ing. It turned out there's this corner case in btrfs's handling of range_cyclic where files that were being redirtied were not getting fully written out because of how we do range_cyclic writeback. We unconditionally were setting scanned = 1; the first time we found any pages in the inode. This isn't actually what we want, we want it to be set if we've scanned the entire file. For range_cyclic we could be starting in the middle or towards the end of the file, so we could write one page and then not write any of the other dirty pages in the file because we set scanned = 1. Fix this by not setting scanned = 1 if we find pages. The rules for setting scanned should be 1) !range_cyclic. In this case we have a specified range to write out. 2) range_cyclic && index == 0. In this case we've started at the beginning and there is no need to loop around a second time. 3) range_cyclic && we started at index > 0 and we've reached the end of the file without satisfying our nr_to_write. This patch fixes both of our writepages implementations to make sure these rules hold true. This fixed our over zealous CG OOMs in production. Fixes: d1310b2e0cd9 ("Btrfs: Split the extent_map code into two parts") Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ add comment ] Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 394beb474a69..e2d30287e2d5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3919,6 +3919,11 @@ int btree_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; + /* + * Start from the beginning does not need to cycle over the + * range, mark it as scanned. + */ + scanned = (index == 0); } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; @@ -3936,7 +3941,6 @@ retry: tag))) { unsigned i; - scanned = 1; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -4065,6 +4069,11 @@ static int extent_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; + /* + * Start from the beginning does not need to cycle over the + * range, mark it as scanned. + */ + scanned = (index == 0); } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; @@ -4098,7 +4107,6 @@ retry: &index, end, tag))) { unsigned i; - scanned = 1; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; -- cgit v1.2.3-59-g8ed1b From a69976bc69308aa475d0ba3b8b3efd1d013c0460 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Fri, 10 Jan 2020 12:26:34 +0800 Subject: btrfs: device stats, log when stats are zeroed We had a report indicating that some read errors aren't reported by the device stats in the userland. It is important to have the errors reported in the device stat as user land scripts might depend on it to take the reasonable corrective actions. But to debug these issue we need to be really sure that request to reset the device stat did not come from the userland itself. So log an info message when device error reset happens. For example: BTRFS info (device sdc): device stats zeroed by btrfs(9223) Reported-by: philip@philip-seeger.de Link: https://www.spinics.net/lists/linux-btrfs/msg96528.html Reviewed-by: Josef Bacik Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d184a994c392..9658c53eec7b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7329,6 +7329,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, else btrfs_dev_stat_set(dev, i, 0); } + btrfs_info(fs_info, "device stats zeroed by %s (%d)", + current->comm, task_pid_nr(current)); } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (stats->nr_items > i) -- cgit v1.2.3-59-g8ed1b From 68c467cbb2f389b6c933e235bce0d1756fc8cc34 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 16 Dec 2019 20:00:48 +0100 Subject: btrfs: separate definition of assertion failure handlers There's a report where objtool detects unreachable instructions, eg.: fs/btrfs/ctree.o: warning: objtool: btrfs_search_slot()+0x2d4: unreachable instruction This seems to be a false positive due to compiler version. The cause is in the ASSERT macro implementation that does the conditional check as IS_DEFINED(CONFIG_BTRFS_ASSERT) and not an #ifdef. To avoid that, use the ifdefs directly. There are still 2 reports that aren't fixed: fs/btrfs/extent_io.o: warning: objtool: __set_extent_bit()+0x71f: unreachable instruction fs/btrfs/relocation.o: warning: objtool: find_data_references()+0x4e0: unreachable instruction Co-developed-by: Josh Poimboeuf Signed-off-by: Josh Poimboeuf Reported-by: Randy Dunlap Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 569931dd0ce5..f90b82050d2d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3157,17 +3157,21 @@ do { \ rcu_read_unlock(); \ } while (0) -__cold -static inline void assfail(const char *expr, const char *file, int line) +#ifdef CONFIG_BTRFS_ASSERT +__cold __noreturn +static inline void assertfail(const char *expr, const char *file, int line) { - if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) { - pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); - BUG(); - } + pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); + BUG(); } -#define ASSERT(expr) \ - (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) +#define ASSERT(expr) \ + (likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__)) + +#else +static inline void assertfail(const char *expr, const char* file, int line) { } +#define ASSERT(expr) (void)(expr) +#endif /* * Use that for functions that are conditionally exported for sanity tests but -- cgit v1.2.3-59-g8ed1b From 96a14336bd8fc13cc3db036a5745acdf3bc185f4 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 10 Dec 2019 19:57:51 +0200 Subject: btrfs: Move and unexport btrfs_rmap_block It's used only during initial block group reading to map physical address of super block to a list of logical ones. Make it private to block-group.c, add proper kernel doc and ensure it's exported only for tests. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 5 +++ fs/btrfs/volumes.c | 69 ---------------------------------------- fs/btrfs/volumes.h | 2 -- 4 files changed, 91 insertions(+), 71 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 6480976ddc78..00cb40210b53 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -15,6 +15,7 @@ #include "tree-log.h" #include "delalloc-space.h" #include "discard.h" +#include "raid56.h" /* * Return target flags in extended format or 0 if restripe for this chunk_type @@ -1561,6 +1562,91 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) write_sequnlock(&fs_info->profiles_lock); } +/** + * btrfs_rmap_block - Map a physical disk address to a list of logical addresses + * @chunk_start: logical address of block group + * @physical: physical address to map to logical addresses + * @logical: return array of logical addresses which map to @physical + * @naddrs: length of @logical + * @stripe_len: size of IO stripe for the given block group + * + * Maps a particular @physical disk address to a list of @logical addresses. + * Used primarily to exclude those portions of a block group that contain super + * block copies. + */ +EXPORT_FOR_TESTS +int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, + u64 physical, u64 **logical, int *naddrs, int *stripe_len) +{ + struct extent_map *em; + struct map_lookup *map; + u64 *buf; + u64 bytenr; + u64 length; + u64 stripe_nr; + u64 rmap_len; + int i, j, nr = 0; + + em = btrfs_get_chunk_map(fs_info, chunk_start, 1); + if (IS_ERR(em)) + return -EIO; + + map = em->map_lookup; + length = em->len; + rmap_len = map->stripe_len; + + if (map->type & BTRFS_BLOCK_GROUP_RAID10) + length = div_u64(length, map->num_stripes / map->sub_stripes); + else if (map->type & BTRFS_BLOCK_GROUP_RAID0) + length = div_u64(length, map->num_stripes); + else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { + length = div_u64(length, nr_data_stripes(map)); + rmap_len = map->stripe_len * nr_data_stripes(map); + } + + buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); + BUG_ON(!buf); /* -ENOMEM */ + + for (i = 0; i < map->num_stripes; i++) { + if (map->stripes[i].physical > physical || + map->stripes[i].physical + length <= physical) + continue; + + stripe_nr = physical - map->stripes[i].physical; + stripe_nr = div64_u64(stripe_nr, map->stripe_len); + + if (map->type & BTRFS_BLOCK_GROUP_RAID10) { + stripe_nr = stripe_nr * map->num_stripes + i; + stripe_nr = div_u64(stripe_nr, map->sub_stripes); + } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + stripe_nr = stripe_nr * map->num_stripes + i; + } + /* + * The remaining case would be for RAID56, multiply by + * nr_data_stripes(). Alternatively, just use rmap_len below + * instead of map->stripe_len + */ + + bytenr = chunk_start + stripe_nr * rmap_len; + WARN_ON(nr >= map->num_stripes); + for (j = 0; j < nr; j++) { + if (buf[j] == bytenr) + break; + } + if (j == nr) { + WARN_ON(nr >= map->num_stripes); + buf[nr++] = bytenr; + } + } + + *logical = buf; + *naddrs = nr; + *stripe_len = rmap_len; + + free_extent_map(em); + return 0; +} + static int exclude_super_stripes(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 5cf1ea33f284..107bb557ca8d 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -283,4 +283,9 @@ static inline int btrfs_block_group_done(struct btrfs_block_group *cache) cache->cached == BTRFS_CACHE_ERROR; } +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, + u64 physical, u64 **logical, int *naddrs, int *stripe_len); +#endif + #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9658c53eec7b..ab1b3c35c5eb 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6114,75 +6114,6 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); } -int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, - u64 physical, u64 **logical, int *naddrs, int *stripe_len) -{ - struct extent_map *em; - struct map_lookup *map; - u64 *buf; - u64 bytenr; - u64 length; - u64 stripe_nr; - u64 rmap_len; - int i, j, nr = 0; - - em = btrfs_get_chunk_map(fs_info, chunk_start, 1); - if (IS_ERR(em)) - return -EIO; - - map = em->map_lookup; - length = em->len; - rmap_len = map->stripe_len; - - if (map->type & BTRFS_BLOCK_GROUP_RAID10) - length = div_u64(length, map->num_stripes / map->sub_stripes); - else if (map->type & BTRFS_BLOCK_GROUP_RAID0) - length = div_u64(length, map->num_stripes); - else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - length = div_u64(length, nr_data_stripes(map)); - rmap_len = map->stripe_len * nr_data_stripes(map); - } - - buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); - BUG_ON(!buf); /* -ENOMEM */ - - for (i = 0; i < map->num_stripes; i++) { - if (map->stripes[i].physical > physical || - map->stripes[i].physical + length <= physical) - continue; - - stripe_nr = physical - map->stripes[i].physical; - stripe_nr = div64_u64(stripe_nr, map->stripe_len); - - if (map->type & BTRFS_BLOCK_GROUP_RAID10) { - stripe_nr = stripe_nr * map->num_stripes + i; - stripe_nr = div_u64(stripe_nr, map->sub_stripes); - } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { - stripe_nr = stripe_nr * map->num_stripes + i; - } /* else if RAID[56], multiply by nr_data_stripes(). - * Alternatively, just use rmap_len below instead of - * map->stripe_len */ - - bytenr = chunk_start + stripe_nr * rmap_len; - WARN_ON(nr >= map->num_stripes); - for (j = 0; j < nr; j++) { - if (buf[j] == bytenr) - break; - } - if (j == nr) { - WARN_ON(nr >= map->num_stripes); - buf[nr++] = bytenr; - } - } - - *logical = buf; - *naddrs = nr; - *stripe_len = rmap_len; - - free_extent_map(em); - return 0; -} - static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) { bio->bi_private = bbio->private; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 81f21e42b887..9c7d4fe5c39a 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -415,8 +415,6 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, struct btrfs_bio **bbio_ret); int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 len, struct btrfs_io_geometry *io_geom); -int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, - u64 physical, u64 **logical, int *naddrs, int *stripe_len); int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type); -- cgit v1.2.3-59-g8ed1b From b3ad2c17fd80f2db3b824c28691adaaa4ffa2fc4 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 19 Nov 2019 14:05:51 +0200 Subject: btrfs: selftests: Add support for dummy devices Add basic infrastructure to create and link dummy btrfs_devices. This will be used in the pending btrfs_rmap_block test which deals with the block groups. Calling btrfs_alloc_dummy_device will link the newly created device to the passed fs_info and the test framework will free them once the test is finished. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tests/btrfs-tests.c | 29 +++++++++++++++++++++++++++++ fs/btrfs/tests/btrfs-tests.h | 1 + 2 files changed, 30 insertions(+) diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index a7aca4141788..c12b91ff5f56 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -86,6 +86,27 @@ static void btrfs_destroy_test_fs(void) unregister_filesystem(&test_type); } +struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info) +{ + struct btrfs_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL); + INIT_LIST_HEAD(&dev->dev_list); + list_add(&dev->dev_list, &fs_info->fs_devices->devices); + + return dev; +} + +static void btrfs_free_dummy_device(struct btrfs_device *dev) +{ + extent_io_tree_release(&dev->alloc_state); + kfree(dev); +} + struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) { struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), @@ -132,12 +153,14 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) INIT_LIST_HEAD(&fs_info->dirty_qgroups); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); + INIT_LIST_HEAD(&fs_info->fs_devices->devices); INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); extent_io_tree_init(fs_info, &fs_info->freed_extents[0], IO_TREE_FS_INFO_FREED_EXTENTS0, NULL); extent_io_tree_init(fs_info, &fs_info->freed_extents[1], IO_TREE_FS_INFO_FREED_EXTENTS1, NULL); + extent_map_tree_init(&fs_info->mapping_tree); fs_info->pinned_extents = &fs_info->freed_extents[0]; set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); @@ -150,6 +173,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) { struct radix_tree_iter iter; void **slot; + struct btrfs_device *dev, *tmp; if (!fs_info) return; @@ -180,6 +204,11 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) } spin_unlock(&fs_info->buffer_lock); + btrfs_mapping_tree_free(&fs_info->mapping_tree); + list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices, + dev_list) { + btrfs_free_dummy_device(dev); + } btrfs_free_qgroup_config(fs_info); btrfs_free_fs_roots(fs_info); cleanup_srcu_struct(&fs_info->subvol_srcu); diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h index 9e52527357d8..7a2d7ffbe30e 100644 --- a/fs/btrfs/tests/btrfs-tests.h +++ b/fs/btrfs/tests/btrfs-tests.h @@ -46,6 +46,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long lengt void btrfs_free_dummy_block_group(struct btrfs_block_group *cache); void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); +struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info); #else static inline int btrfs_run_sanity_tests(void) { -- cgit v1.2.3-59-g8ed1b From bf2e2eb060fa29dd892fa42dd0a03f130815acf0 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 10 Dec 2019 20:00:45 +0200 Subject: btrfs: Add self-tests for btrfs_rmap_block Add RAID1 and single testcases to verify that data stripes are excluded from super block locations and that the address mapping is valid. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/tests/extent-map-tests.c | 154 +++++++++++++++++++++++++++++++++++++- 1 file changed, 153 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c index 4a7f796c9900..57379e96ccc9 100644 --- a/fs/btrfs/tests/extent-map-tests.c +++ b/fs/btrfs/tests/extent-map-tests.c @@ -6,6 +6,9 @@ #include #include "btrfs-tests.h" #include "../ctree.h" +#include "../volumes.h" +#include "../disk-io.h" +#include "../block-group.h" static void free_extent_map_tree(struct extent_map_tree *em_tree) { @@ -437,11 +440,153 @@ static int test_case_4(struct btrfs_fs_info *fs_info, return ret; } +struct rmap_test_vector { + u64 raid_type; + u64 physical_start; + u64 data_stripe_size; + u64 num_data_stripes; + u64 num_stripes; + /* Assume we won't have more than 5 physical stripes */ + u64 data_stripe_phys_start[5]; + bool expected_mapped_addr; + /* Physical to logical addresses */ + u64 mapped_logical[5]; +}; + +static int test_rmap_block(struct btrfs_fs_info *fs_info, + struct rmap_test_vector *test) +{ + struct extent_map *em; + struct map_lookup *map = NULL; + u64 *logical = NULL; + int i, out_ndaddrs, out_stripe_len; + int ret; + + em = alloc_extent_map(); + if (!em) { + test_std_err(TEST_ALLOC_EXTENT_MAP); + return -ENOMEM; + } + + map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL); + if (!map) { + kfree(em); + test_std_err(TEST_ALLOC_EXTENT_MAP); + return -ENOMEM; + } + + set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); + /* Start at 4GiB logical address */ + em->start = SZ_4G; + em->len = test->data_stripe_size * test->num_data_stripes; + em->block_len = em->len; + em->orig_block_len = test->data_stripe_size; + em->map_lookup = map; + + map->num_stripes = test->num_stripes; + map->stripe_len = BTRFS_STRIPE_LEN; + map->type = test->raid_type; + + for (i = 0; i < map->num_stripes; i++) { + struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info); + + if (IS_ERR(dev)) { + test_err("cannot allocate device"); + ret = PTR_ERR(dev); + goto out; + } + map->stripes[i].dev = dev; + map->stripes[i].physical = test->data_stripe_phys_start[i]; + } + + write_lock(&fs_info->mapping_tree.lock); + ret = add_extent_mapping(&fs_info->mapping_tree, em, 0); + write_unlock(&fs_info->mapping_tree.lock); + if (ret) { + test_err("error adding block group mapping to mapping tree"); + goto out_free; + } + + ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1), + &logical, &out_ndaddrs, &out_stripe_len); + if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) { + test_err("didn't rmap anything but expected %d", + test->expected_mapped_addr); + goto out; + } + + if (out_stripe_len != BTRFS_STRIPE_LEN) { + test_err("calculated stripe length doesn't match"); + goto out; + } + + if (out_ndaddrs != test->expected_mapped_addr) { + for (i = 0; i < out_ndaddrs; i++) + test_msg("mapped %llu", logical[i]); + test_err("unexpected number of mapped addresses: %d", out_ndaddrs); + goto out; + } + + for (i = 0; i < out_ndaddrs; i++) { + if (logical[i] != test->mapped_logical[i]) { + test_err("unexpected logical address mapped"); + goto out; + } + } + + ret = 0; +out: + write_lock(&fs_info->mapping_tree.lock); + remove_extent_mapping(&fs_info->mapping_tree, em); + write_unlock(&fs_info->mapping_tree.lock); + /* For us */ + free_extent_map(em); +out_free: + /* For the tree */ + free_extent_map(em); + kfree(logical); + return ret; +} + int btrfs_test_extent_map(void) { struct btrfs_fs_info *fs_info = NULL; struct extent_map_tree *em_tree; - int ret = 0; + int ret = 0, i; + struct rmap_test_vector rmap_tests[] = { + { + /* + * Test a chunk with 2 data stripes one of which + * interesects the physical address of the super block + * is correctly recognised. + */ + .raid_type = BTRFS_BLOCK_GROUP_RAID1, + .physical_start = SZ_64M - SZ_4M, + .data_stripe_size = SZ_256M, + .num_data_stripes = 2, + .num_stripes = 2, + .data_stripe_phys_start = + {SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M}, + .expected_mapped_addr = true, + .mapped_logical= {SZ_4G + SZ_4M} + }, + { + /* + * Test that out-of-range physical addresses are + * ignored + */ + + /* SINGLE chunk type */ + .raid_type = 0, + .physical_start = SZ_4G, + .data_stripe_size = SZ_256M, + .num_data_stripes = 1, + .num_stripes = 1, + .data_stripe_phys_start = {SZ_256M}, + .expected_mapped_addr = false, + .mapped_logical = {0} + } + }; test_msg("running extent_map tests"); @@ -474,6 +619,13 @@ int btrfs_test_extent_map(void) goto out; ret = test_case_4(fs_info, em_tree); + test_msg("running rmap tests"); + for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) { + ret = test_rmap_block(fs_info, &rmap_tests[i]); + if (ret) + goto out; + } + out: kfree(em_tree); btrfs_free_dummy_fs_info(fs_info); -- cgit v1.2.3-59-g8ed1b From 1776ad172e1ff09d4aa57daac74c56b4e1b0c636 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 19 Nov 2019 14:05:53 +0200 Subject: btrfs: Refactor btrfs_rmap_block to improve readability Move variables to appropriate scope. Remove last BUG_ON in the function and rework error handling accordingly. Make the duplicate detection code more straightforward. Use in_range macro. And give variables more descriptive name by explicitly distinguishing between IO stripe size (size recorded in the chunk item) and data stripe size (the size of an actual stripe, constituting a logical chunk/block group). Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 55 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 00cb40210b53..14851584e245 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1582,34 +1582,43 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, struct map_lookup *map; u64 *buf; u64 bytenr; - u64 length; - u64 stripe_nr; - u64 rmap_len; - int i, j, nr = 0; + u64 data_stripe_length; + u64 io_stripe_size; + int i, nr = 0; + int ret = 0; em = btrfs_get_chunk_map(fs_info, chunk_start, 1); if (IS_ERR(em)) return -EIO; map = em->map_lookup; - length = em->len; - rmap_len = map->stripe_len; + data_stripe_length = em->len; + io_stripe_size = map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID10) - length = div_u64(length, map->num_stripes / map->sub_stripes); + data_stripe_length = div_u64(data_stripe_length, + map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) - length = div_u64(length, map->num_stripes); + data_stripe_length = div_u64(data_stripe_length, map->num_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { - length = div_u64(length, nr_data_stripes(map)); - rmap_len = map->stripe_len * nr_data_stripes(map); + data_stripe_length = div_u64(data_stripe_length, + nr_data_stripes(map)); + io_stripe_size = map->stripe_len * nr_data_stripes(map); } buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); - BUG_ON(!buf); /* -ENOMEM */ + if (!buf) { + ret = -ENOMEM; + goto out; + } for (i = 0; i < map->num_stripes; i++) { - if (map->stripes[i].physical > physical || - map->stripes[i].physical + length <= physical) + bool already_inserted = false; + u64 stripe_nr; + int j; + + if (!in_range(physical, map->stripes[i].physical, + data_stripe_length)) continue; stripe_nr = physical - map->stripes[i].physical; @@ -1627,24 +1636,26 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, * instead of map->stripe_len */ - bytenr = chunk_start + stripe_nr * rmap_len; - WARN_ON(nr >= map->num_stripes); + bytenr = chunk_start + stripe_nr * io_stripe_size; + + /* Ensure we don't add duplicate addresses */ for (j = 0; j < nr; j++) { - if (buf[j] == bytenr) + if (buf[j] == bytenr) { + already_inserted = true; break; + } } - if (j == nr) { - WARN_ON(nr >= map->num_stripes); + + if (!already_inserted) buf[nr++] = bytenr; - } } *logical = buf; *naddrs = nr; - *stripe_len = rmap_len; - + *stripe_len = io_stripe_size; +out: free_extent_map(em); - return 0; + return ret; } static int exclude_super_stripes(struct btrfs_block_group *cache) -- cgit v1.2.3-59-g8ed1b From 668e48af7a94985be7cf3b97a860d9819271d8bf Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Mon, 6 Jan 2020 19:38:31 +0800 Subject: btrfs: sysfs, add devid/dev_state kobject and device attributes New sysfs attributes that track the filesystem status of devices, stored in the per-filesystem directory in /sys/fs/btrfs/FSID/devinfo . There's a directory for each device, with name corresponding to the numerical device id. in_fs_metadata - device is in the list of fs metadata missing - device is missing (no device node or block device) replace_target - device is target of replace writeable - writes from fs are allowed These attributes reflect the state of the device::dev_state and created at mount time. Sample output: $ pwd /sys/fs/btrfs/6e1961f1-5918-4ecc-a22f-948897b409f7/devinfo/1/ $ ls in_fs_metadata missing replace_target writeable $ cat missing 0 The output from these attributes are 0 or 1. 0 indicates unset and 1 indicates set. These attributes are readonly. It is observed that the device delete thread and sysfs read thread will not race because the delete thread calls sysfs kobject_put() which in turn waits for existing sysfs read to complete. Note for device replace devid swap: During the replace the target device temporarily assumes devid 0 before assigning the devid of the soruce device. In btrfs_dev_replace_finishing() we remove source sysfs devid using the function btrfs_sysfs_remove_devices_attr(), so after that call kobject_rename() to update the devid in the sysfs. This adds and calls btrfs_sysfs_update_devid() helper function to update the device id. Signed-off-by: Anand Jain Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 1 + fs/btrfs/sysfs.c | 155 +++++++++++++++++++++++++++++++++++++++++-------- fs/btrfs/sysfs.h | 1 + fs/btrfs/volumes.h | 4 ++ 4 files changed, 138 insertions(+), 23 deletions(-) diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index f639dde2a679..8600beb9c086 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -707,6 +707,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, /* replace the sysfs entry */ btrfs_sysfs_rm_device_link(fs_info->fs_devices, src_device); + btrfs_sysfs_update_devid(tgt_device); btrfs_rm_dev_replace_free_srcdev(src_device); /* write back the superblocks */ diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 55e4ed1af29c..7436422194da 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1152,29 +1152,117 @@ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, if (!fs_devices->devices_kobj) return -EINVAL; - if (one_device && one_device->bdev) { - disk = one_device->bdev->bd_part; - disk_kobj = &part_to_dev(disk)->kobj; + if (one_device) { + if (one_device->bdev) { + disk = one_device->bdev->bd_part; + disk_kobj = &part_to_dev(disk)->kobj; + sysfs_remove_link(fs_devices->devices_kobj, + disk_kobj->name); + } - sysfs_remove_link(fs_devices->devices_kobj, disk_kobj->name); - } + kobject_del(&one_device->devid_kobj); + kobject_put(&one_device->devid_kobj); + + wait_for_completion(&one_device->kobj_unregister); - if (one_device) return 0; + } - list_for_each_entry(one_device, - &fs_devices->devices, dev_list) { - if (!one_device->bdev) - continue; - disk = one_device->bdev->bd_part; - disk_kobj = &part_to_dev(disk)->kobj; + list_for_each_entry(one_device, &fs_devices->devices, dev_list) { + + if (one_device->bdev) { + disk = one_device->bdev->bd_part; + disk_kobj = &part_to_dev(disk)->kobj; + sysfs_remove_link(fs_devices->devices_kobj, + disk_kobj->name); + } + kobject_del(&one_device->devid_kobj); + kobject_put(&one_device->devid_kobj); - sysfs_remove_link(fs_devices->devices_kobj, disk_kobj->name); + wait_for_completion(&one_device->kobj_unregister); } return 0; } +static ssize_t btrfs_devinfo_in_fs_metadata_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + int val; + struct btrfs_device *device = container_of(kobj, struct btrfs_device, + devid_kobj); + + val = !!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} +BTRFS_ATTR(devid, in_fs_metadata, btrfs_devinfo_in_fs_metadata_show); + +static ssize_t btrfs_sysfs_missing_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + int val; + struct btrfs_device *device = container_of(kobj, struct btrfs_device, + devid_kobj); + + val = !!test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} +BTRFS_ATTR(devid, missing, btrfs_sysfs_missing_show); + +static ssize_t btrfs_devinfo_replace_target_show(struct kobject *kobj, + struct kobj_attribute *a, + char *buf) +{ + int val; + struct btrfs_device *device = container_of(kobj, struct btrfs_device, + devid_kobj); + + val = !!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} +BTRFS_ATTR(devid, replace_target, btrfs_devinfo_replace_target_show); + +static ssize_t btrfs_devinfo_writeable_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + int val; + struct btrfs_device *device = container_of(kobj, struct btrfs_device, + devid_kobj); + + val = !!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} +BTRFS_ATTR(devid, writeable, btrfs_devinfo_writeable_show); + +static struct attribute *devid_attrs[] = { + BTRFS_ATTR_PTR(devid, in_fs_metadata), + BTRFS_ATTR_PTR(devid, missing), + BTRFS_ATTR_PTR(devid, replace_target), + BTRFS_ATTR_PTR(devid, writeable), + NULL +}; +ATTRIBUTE_GROUPS(devid); + +static void btrfs_release_devid_kobj(struct kobject *kobj) +{ + struct btrfs_device *device = container_of(kobj, struct btrfs_device, + devid_kobj); + + memset(&device->devid_kobj, 0, sizeof(struct kobject)); + complete(&device->kobj_unregister); +} + +static struct kobj_type devid_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = devid_groups, + .release = btrfs_release_devid_kobj, +}; + int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device) { @@ -1182,22 +1270,31 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *dev; list_for_each_entry(dev, &fs_devices->devices, dev_list) { - struct hd_struct *disk; - struct kobject *disk_kobj; - - if (!dev->bdev) - continue; if (one_device && one_device != dev) continue; - disk = dev->bdev->bd_part; - disk_kobj = &part_to_dev(disk)->kobj; + if (dev->bdev) { + struct hd_struct *disk; + struct kobject *disk_kobj; + + disk = dev->bdev->bd_part; + disk_kobj = &part_to_dev(disk)->kobj; + + error = sysfs_create_link(fs_devices->devices_kobj, + disk_kobj, disk_kobj->name); + if (error) + break; + } - error = sysfs_create_link(fs_devices->devices_kobj, - disk_kobj, disk_kobj->name); - if (error) + init_completion(&dev->kobj_unregister); + error = kobject_init_and_add(&dev->devid_kobj, &devid_ktype, + fs_devices->devices_kobj, "%llu", + dev->devid); + if (error) { + kobject_put(&dev->devid_kobj); break; + } } return error; @@ -1229,6 +1326,18 @@ void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, "sysfs: failed to create fsid for sprout"); } +void btrfs_sysfs_update_devid(struct btrfs_device *device) +{ + char tmp[24]; + + snprintf(tmp, sizeof(tmp), "%llu", device->devid); + + if (kobject_rename(&device->devid_kobj, tmp)) + btrfs_warn(device->fs_devices->fs_info, + "sysfs: failed to update devid for %llu", + device->devid); +} + /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 3d27b39eaf94..c68582add92e 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -34,5 +34,6 @@ void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache); int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info); +void btrfs_sysfs_update_devid(struct btrfs_device *device); #endif diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 9c7d4fe5c39a..690d4f5a0653 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -136,6 +136,10 @@ struct btrfs_device { atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; struct extent_io_tree alloc_state; + + struct completion kobj_unregister; + /* For sysfs/FSID/devinfo/devid/ */ + struct kobject devid_kobj; }; /* -- cgit v1.2.3-59-g8ed1b From 889bfa39086e86b52fcfaa04d72c95eaeb12f9a5 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Jan 2020 09:12:45 -0500 Subject: btrfs: drop log root for dropped roots If we fsync on a subvolume and create a log root for that volume, and then later delete that subvolume we'll never clean up its log root. Fix this by making switch_commit_roots free the log for any dropped roots we encounter. The extra churn is because we need a btrfs_trans_handle, not the btrfs_transaction. CC: stable@vger.kernel.org # 5.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index cfc08ef9b876..55d8fd68775a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -147,13 +147,14 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) } } -static noinline void switch_commit_roots(struct btrfs_transaction *trans) +static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) { + struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root, *tmp; down_write(&fs_info->commit_root_sem); - list_for_each_entry_safe(root, tmp, &trans->switch_commits, + list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, dirty_list) { list_del_init(&root->dirty_list); free_extent_buffer(root->commit_root); @@ -165,16 +166,17 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans) } /* We can free old roots now. */ - spin_lock(&trans->dropped_roots_lock); - while (!list_empty(&trans->dropped_roots)) { - root = list_first_entry(&trans->dropped_roots, + spin_lock(&cur_trans->dropped_roots_lock); + while (!list_empty(&cur_trans->dropped_roots)) { + root = list_first_entry(&cur_trans->dropped_roots, struct btrfs_root, root_list); list_del_init(&root->root_list); - spin_unlock(&trans->dropped_roots_lock); + spin_unlock(&cur_trans->dropped_roots_lock); + btrfs_free_log(trans, root); btrfs_drop_and_free_fs_root(fs_info, root); - spin_lock(&trans->dropped_roots_lock); + spin_lock(&cur_trans->dropped_roots_lock); } - spin_unlock(&trans->dropped_roots_lock); + spin_unlock(&cur_trans->dropped_roots_lock); up_write(&fs_info->commit_root_sem); } @@ -1421,7 +1423,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, ret = commit_cowonly_roots(trans); if (ret) goto out; - switch_commit_roots(trans->transaction); + switch_commit_roots(trans); ret = btrfs_write_and_wait_transaction(trans); if (ret) btrfs_handle_fs_error(fs_info, ret, @@ -2301,7 +2303,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) list_add_tail(&fs_info->chunk_root->dirty_list, &cur_trans->switch_commits); - switch_commit_roots(cur_trans); + switch_commit_roots(trans); ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->io_bgs)); -- cgit v1.2.3-59-g8ed1b From d62b23c94952e78211a383b7d90ef0afbd9a3717 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Jan 2020 08:57:51 -0500 Subject: btrfs: set trans->drity in btrfs_commit_transaction If we abort a transaction we have the following sequence if (!trans->dirty && list_empty(&trans->new_bgs)) return; WRITE_ONCE(trans->transaction->aborted, err); The idea being if we didn't modify anything with our trans handle then we don't really need to abort the whole transaction, maybe the other trans handles are fine and we can carry on. However in the case of create_snapshot we add a pending_snapshot object to our transaction and then commit the transaction. We don't actually modify anything. sync() behaves the same way, attach to an existing transaction and commit it. This means that if we have an IO error in the right places we could abort the committing transaction with our trans->dirty being not set and thus not set transaction->aborted. This is a problem because in the create_snapshot() case we depend on pending->error being set to something, or btrfs_commit_transaction returning an error. If we are not the trans handle that gets to commit the transaction, and we're waiting on the commit to happen we get our return value from cur_trans->aborted. If this was not set to anything because sync() hit an error in the transaction commit before it could modify anything then cur_trans->aborted would be 0. Thus we'd return 0 from btrfs_commit_transaction() in create_snapshot. This is a problem because we then try to do things with pending_snapshot->snap, which will be NULL because we didn't create the snapshot, and then we'll get a NULL pointer dereference like the following "BUG: kernel NULL pointer dereference, address: 00000000000001f0" RIP: 0010:btrfs_orphan_cleanup+0x2d/0x330 Call Trace: ? btrfs_mksubvol.isra.31+0x3f2/0x510 btrfs_mksubvol.isra.31+0x4bc/0x510 ? __sb_start_write+0xfa/0x200 ? mnt_want_write_file+0x24/0x50 btrfs_ioctl_snap_create_transid+0x16c/0x1a0 btrfs_ioctl_snap_create_v2+0x11e/0x1a0 btrfs_ioctl+0x1534/0x2c10 ? free_debug_processing+0x262/0x2a3 do_vfs_ioctl+0xa6/0x6b0 ? do_sys_open+0x188/0x220 ? syscall_trace_enter+0x1f8/0x330 ksys_ioctl+0x60/0x90 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x4a/0x1b0 In order to fix this we need to make sure anybody who calls commit_transaction has trans->dirty set so that they properly set the trans->transaction->aborted value properly so any waiters know bad things happened. This was found while I was running generic/475 with my modified fsstress, it reproduced within a few runs. I ran with this patch all night and didn't see the problem again. CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 55d8fd68775a..33dcc88b428a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2015,6 +2015,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) ASSERT(refcount_read(&trans->use_count) == 1); + /* + * Some places just start a transaction to commit it. We need to make + * sure that if this commit fails that the abort code actually marks the + * transaction as failed, so set trans->dirty to make the abort code do + * the right thing. + */ + trans->dirty = true; + /* Stop the commit early if ->aborted is set */ if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; -- cgit v1.2.3-59-g8ed1b From b5e4ff9d465da1233a2d9a47ebce487c70d8f4ab Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 15 Jan 2020 13:21:35 +0000 Subject: Btrfs: fix infinite loop during fsync after rename operations Recently fsstress (from fstests) sporadically started to trigger an infinite loop during fsync operations. This turned out to be because support for the rename exchange and whiteout operations was added to fsstress in fstests. These operations, unlike any others in fsstress, cause file names to be reused, whence triggering this issue. However it's not necessary to use rename exchange and rename whiteout operations trigger this issue, simple rename operations and file creations are enough to trigger the issue. The issue boils down to when we are logging inodes that conflict (that had the name of any inode we need to log during the fsync operation), we keep logging them even if they were already logged before, and after that we check if there's any other inode that conflicts with them and then add it again to the list of inodes to log. Skipping already logged inodes fixes the issue. Consider the following example: $ mkfs.btrfs -f /dev/sdb $ mount /dev/sdb /mnt $ mkdir /mnt/testdir # inode 257 $ touch /mnt/testdir/zz # inode 258 $ ln /mnt/testdir/zz /mnt/testdir/zz_link $ touch /mnt/testdir/a # inode 259 $ sync # The following 3 renames achieve the same result as a rename exchange # operation ( /mnt/testdir/zz_link to /mnt/testdir/a). $ mv /mnt/testdir/a /mnt/testdir/a/tmp $ mv /mnt/testdir/zz_link /mnt/testdir/a $ mv /mnt/testdir/a/tmp /mnt/testdir/zz_link # The following rename and file creation give the same result as a # rename whiteout operation ( zz to a2). $ mv /mnt/testdir/zz /mnt/testdir/a2 $ touch /mnt/testdir/zz # inode 260 $ xfs_io -c fsync /mnt/testdir/zz --> results in the infinite loop The following steps happen: 1) When logging inode 260, we find that its reference named "zz" was used by inode 258 in the previous transaction (through the commit root), so inode 258 is added to the list of conflicting indoes that need to be logged; 2) After logging inode 258, we find that its reference named "a" was used by inode 259 in the previous transaction, and therefore we add inode 259 to the list of conflicting inodes to be logged; 3) After logging inode 259, we find that its reference named "zz_link" was used by inode 258 in the previous transaction - we add inode 258 to the list of conflicting inodes to log, again - we had already logged it before at step 3. After logging it again, we find again that inode 259 conflicts with him, and we add again 259 to the list, etc - we end up repeating all the previous steps. So fix this by skipping logging of conflicting inodes that were already logged. Fixes: 6b5fc433a7ad67 ("Btrfs: fix fsync after succession of renames of different files") CC: stable@vger.kernel.org # 5.1+ Signed-off-by: Filipe Manana Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a2bae5c230e1..7dd7552f53a4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4822,6 +4822,50 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, } continue; } + /* + * If the inode was already logged skip it - otherwise we can + * hit an infinite loop. Example: + * + * From the commit root (previous transaction) we have the + * following inodes: + * + * inode 257 a directory + * inode 258 with references "zz" and "zz_link" on inode 257 + * inode 259 with reference "a" on inode 257 + * + * And in the current (uncommitted) transaction we have: + * + * inode 257 a directory, unchanged + * inode 258 with references "a" and "a2" on inode 257 + * inode 259 with reference "zz_link" on inode 257 + * inode 261 with reference "zz" on inode 257 + * + * When logging inode 261 the following infinite loop could + * happen if we don't skip already logged inodes: + * + * - we detect inode 258 as a conflicting inode, with inode 261 + * on reference "zz", and log it; + * + * - we detect inode 259 as a conflicting inode, with inode 258 + * on reference "a", and log it; + * + * - we detect inode 258 as a conflicting inode, with inode 259 + * on reference "zz_link", and log it - again! After this we + * repeat the above steps forever. + */ + spin_lock(&BTRFS_I(inode)->lock); + /* + * Check the inode's logged_trans only instead of + * btrfs_inode_in_log(). This is because the last_log_commit of + * the inode is not updated when we only log that it exists and + * and it has the full sync bit set (see btrfs_log_inode()). + */ + if (BTRFS_I(inode)->logged_trans == trans->transid) { + spin_unlock(&BTRFS_I(inode)->lock); + btrfs_add_delayed_iput(inode); + continue; + } + spin_unlock(&BTRFS_I(inode)->lock); /* * We are safe logging the other inode without acquiring its * lock as long as we log with the LOG_INODE_EXISTS mode. We -- cgit v1.2.3-59-g8ed1b From c0d81c7cb236782d674ae5701d1a237949e47a7a Mon Sep 17 00:00:00 2001 From: Su Yue Date: Fri, 10 Jan 2020 14:11:32 +0200 Subject: btrfs: Call find_fsid from find_fsid_inprogress Since find_fsid_inprogress should also handle the case in which an fs didn't change its FSID make it call find_fsid directly. This makes the code in device_list_add simpler by eliminating a conditional call of find_fsid. No functional changes. Reviewed-by: Josef Bacik Signed-off-by: Su Yue Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ab1b3c35c5eb..d9957688338b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -672,7 +672,9 @@ error_brelse: /* * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices - * being created with a disk that has already completed its fsid change. + * being created with a disk that has already completed its fsid change. Such + * disk can belong to an fs which has its FSID changed or to one which doesn't. + * Handle both cases here. */ static struct btrfs_fs_devices *find_fsid_inprogress( struct btrfs_super_block *disk_super) @@ -688,7 +690,7 @@ static struct btrfs_fs_devices *find_fsid_inprogress( } } - return NULL; + return find_fsid(disk_super->fsid, NULL); } @@ -737,19 +739,10 @@ static noinline struct btrfs_device *device_list_add(const char *path, BTRFS_SUPER_FLAG_CHANGING_FSID_V2); if (fsid_change_in_progress) { - if (!has_metadata_uuid) { - /* - * When we have an image which has CHANGING_FSID_V2 set - * it might belong to either a filesystem which has - * disks with completed fsid change or it might belong - * to fs with no UUID changes in effect, handle both. - */ + if (!has_metadata_uuid) fs_devices = find_fsid_inprogress(disk_super); - if (!fs_devices) - fs_devices = find_fsid(disk_super->fsid, NULL); - } else { + else fs_devices = find_fsid_changed(disk_super); - } } else if (has_metadata_uuid) { fs_devices = find_fsid(disk_super->fsid, disk_super->metadata_uuid); -- cgit v1.2.3-59-g8ed1b From c6730a0e5778bf2d83189f97ab5e3efe61f6aad1 Mon Sep 17 00:00:00 2001 From: Su Yue Date: Fri, 10 Jan 2020 14:11:33 +0200 Subject: btrfs: Factor out metadata_uuid code from find_fsid. find_fsid became rather hairy with the introduction of metadata uuid changing feature. Alleviate this by factoring out the metadata uuid specific code in a dedicated function which deals with finding correct fsid for a device with changed uuid. Reviewed-by: Josef Bacik Signed-off-by: Su Yue Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 77 +++++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d9957688338b..d48274451452 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -441,39 +441,6 @@ static noinline struct btrfs_fs_devices *find_fsid( ASSERT(fsid); - if (metadata_fsid) { - /* - * Handle scanned device having completed its fsid change but - * belonging to a fs_devices that was created by first scanning - * a device which didn't have its fsid/metadata_uuid changed - * at all and the CHANGING_FSID_V2 flag set. - */ - list_for_each_entry(fs_devices, &fs_uuids, fs_list) { - if (fs_devices->fsid_change && - memcmp(metadata_fsid, fs_devices->fsid, - BTRFS_FSID_SIZE) == 0 && - memcmp(fs_devices->fsid, fs_devices->metadata_uuid, - BTRFS_FSID_SIZE) == 0) { - return fs_devices; - } - } - /* - * Handle scanned device having completed its fsid change but - * belonging to a fs_devices that was created by a device that - * has an outdated pair of fsid/metadata_uuid and - * CHANGING_FSID_V2 flag set. - */ - list_for_each_entry(fs_devices, &fs_uuids, fs_list) { - if (fs_devices->fsid_change && - memcmp(fs_devices->metadata_uuid, - fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && - memcmp(metadata_fsid, fs_devices->metadata_uuid, - BTRFS_FSID_SIZE) == 0) { - return fs_devices; - } - } - } - /* Handle non-split brain cases */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (metadata_fsid) { @@ -489,6 +456,47 @@ static noinline struct btrfs_fs_devices *find_fsid( return NULL; } +static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( + struct btrfs_super_block *disk_super) +{ + + struct btrfs_fs_devices *fs_devices; + + /* + * Handle scanned device having completed its fsid change but + * belonging to a fs_devices that was created by first scanning + * a device which didn't have its fsid/metadata_uuid changed + * at all and the CHANGING_FSID_V2 flag set. + */ + list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + if (fs_devices->fsid_change && + memcmp(disk_super->metadata_uuid, fs_devices->fsid, + BTRFS_FSID_SIZE) == 0 && + memcmp(fs_devices->fsid, fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) == 0) { + return fs_devices; + } + } + /* + * Handle scanned device having completed its fsid change but + * belonging to a fs_devices that was created by a device that + * has an outdated pair of fsid/metadata_uuid and + * CHANGING_FSID_V2 flag set. + */ + list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + if (fs_devices->fsid_change && + memcmp(fs_devices->metadata_uuid, + fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && + memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) == 0) { + return fs_devices; + } + } + + return find_fsid(disk_super->fsid, disk_super->metadata_uuid); +} + + static int btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, int flush, struct block_device **bdev, @@ -744,8 +752,7 @@ static noinline struct btrfs_device *device_list_add(const char *path, else fs_devices = find_fsid_changed(disk_super); } else if (has_metadata_uuid) { - fs_devices = find_fsid(disk_super->fsid, - disk_super->metadata_uuid); + fs_devices = find_fsid_with_metadata_uuid(disk_super); } else { fs_devices = find_fsid(disk_super->fsid, NULL); } -- cgit v1.2.3-59-g8ed1b From 05840710149c7d1a78ea85a2db5723f706e97d8f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 10 Jan 2020 14:11:34 +0200 Subject: btrfs: Handle another split brain scenario with metadata uuid feature There is one more cases which isn't handled by the original metadata uuid work. Namely, when a filesystem has METADATA_UUID incompat bit and the user decides to change the FSID to the original one e.g. have metadata_uuid and fsid match. In case of power failure while this operation is in progress we could end up in a situation where some of the disks have the incompat bit removed and the other half have both METADATA_UUID_INCOMPAT and FSID_CHANGING_IN_PROGRESS flags. This patch handles the case where a disk that has successfully changed its FSID such that it equals METADATA_UUID is scanned first. Subsequently when a disk with both METADATA_UUID_INCOMPAT/FSID_CHANGING_IN_PROGRESS flags is scanned find_fsid_changed won't be able to find an appropriate btrfs_fs_devices. This is done by extending find_fsid_changed to correctly find btrfs_fs_devices whose metadata_uuid/fsid are the same and they match the metadata_uuid of the currently scanned device. Fixes: cc5de4e70256 ("btrfs: Handle final split-brain possibility during fsid change") Reviewed-by: Josef Bacik Reported-by: Su Yue Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d48274451452..7a30dd8ffdb5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -710,17 +710,28 @@ static struct btrfs_fs_devices *find_fsid_changed( /* * Handles the case where scanned device is part of an fs that had * multiple successful changes of FSID but curently device didn't - * observe it. Meaning our fsid will be different than theirs. + * observe it. Meaning our fsid will be different than theirs. We need + * to handle two subcases : + * 1 - The fs still continues to have different METADATA/FSID uuids. + * 2 - The fs is switched back to its original FSID (METADATA/FSID + * are equal). */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + /* Changed UUIDs */ if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, disk_super->fsid, - BTRFS_FSID_SIZE) != 0) { + BTRFS_FSID_SIZE) != 0) + return fs_devices; + + /* Unchanged UUIDs */ + if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, + BTRFS_FSID_SIZE) == 0 && + memcmp(fs_devices->fsid, disk_super->metadata_uuid, + BTRFS_FSID_SIZE) == 0) return fs_devices; - } } return NULL; -- cgit v1.2.3-59-g8ed1b From 1362089d2ad7e20d16371b39d3c11990d4ec23e4 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 10 Jan 2020 14:11:35 +0200 Subject: btrfs: Fix split-brain handling when changing FSID to metadata uuid Current code doesn't correctly handle the situation which arises when a file system that has METADATA_UUID_INCOMPAT flag set and has its FSID changed to the one in metadata uuid. This causes the incompat flag to disappear. In case of a power failure we could end up in a situation where part of the disks in a multi-disk filesystem are correctly reverted to METADATA_UUID_INCOMPAT flag unset state, while others have METADATA_UUID_INCOMPAT set and CHANGING_FSID_V2_IN_PROGRESS. This patch corrects the behavior required to handle the case where a disk of the second type is scanned first, creating the necessary btrfs_fs_devices. Subsequently, when a disk which has already completed the transition is scanned it should overwrite the data in btrfs_fs_devices. Reported-by: Su Yue Reviewed-by: Josef Bacik Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7a30dd8ffdb5..9cfc668f91f4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -736,6 +736,32 @@ static struct btrfs_fs_devices *find_fsid_changed( return NULL; } + +static struct btrfs_fs_devices *find_fsid_reverted_metadata( + struct btrfs_super_block *disk_super) +{ + struct btrfs_fs_devices *fs_devices; + + /* + * Handle the case where the scanned device is part of an fs whose last + * metadata UUID change reverted it to the original FSID. At the same + * time * fs_devices was first created by another constitutent device + * which didn't fully observe the operation. This results in an + * btrfs_fs_devices created with metadata/fsid different AND + * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the + * fs_devices equal to the FSID of the disk. + */ + list_for_each_entry(fs_devices, &fs_uuids, fs_list) { + if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) != 0 && + memcmp(fs_devices->metadata_uuid, disk_super->fsid, + BTRFS_FSID_SIZE) == 0 && + fs_devices->fsid_change) + return fs_devices; + } + + return NULL; +} /* * Add new device to list of registered devices * @@ -765,7 +791,9 @@ static noinline struct btrfs_device *device_list_add(const char *path, } else if (has_metadata_uuid) { fs_devices = find_fsid_with_metadata_uuid(disk_super); } else { - fs_devices = find_fsid(disk_super->fsid, NULL); + fs_devices = find_fsid_reverted_metadata(disk_super); + if (!fs_devices) + fs_devices = find_fsid(disk_super->fsid, NULL); } @@ -795,12 +823,18 @@ static noinline struct btrfs_device *device_list_add(const char *path, * a device which had the CHANGING_FSID_V2 flag then replace the * metadata_uuid/fsid values of the fs_devices. */ - if (has_metadata_uuid && fs_devices->fsid_change && + if (fs_devices->fsid_change && found_transid > fs_devices->latest_generation) { memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); - memcpy(fs_devices->metadata_uuid, - disk_super->metadata_uuid, BTRFS_FSID_SIZE); + + if (has_metadata_uuid) + memcpy(fs_devices->metadata_uuid, + disk_super->metadata_uuid, + BTRFS_FSID_SIZE); + else + memcpy(fs_devices->metadata_uuid, + disk_super->fsid, BTRFS_FSID_SIZE); fs_devices->fsid_change = false; } -- cgit v1.2.3-59-g8ed1b From 4e19443da1941050b346f8fc4c368aa68413bc88 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 21 Jan 2020 09:17:06 -0500 Subject: btrfs: free block groups after free'ing fs trees Sometimes when running generic/475 we would trip the WARN_ON(cache->reserved) check when free'ing the block groups on umount. This is because sometimes we don't commit the transaction because of IO errors and thus do not cleanup the tree logs until at umount time. These blocks are still reserved until they are cleaned up, but they aren't cleaned up until _after_ we do the free block groups work. Fix this by moving the free after free'ing the fs roots, that way all of the tree logs are cleaned up and we have a properly cleaned fs. A bunch of loops of generic/475 confirmed this fixes the problem. CC: stable@vger.kernel.org # 4.9+ Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5ce2801f8388..aea48d6ddc0c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4030,11 +4030,18 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); - btrfs_free_block_groups(fs_info); - clear_bit(BTRFS_FS_OPEN, &fs_info->flags); free_root_pointers(fs_info, true); + /* + * We must free the block groups after dropping the fs_roots as we could + * have had an IO error and have left over tree log blocks that aren't + * cleaned up until the fs roots are freed. This makes the block group + * accounting appear to be wrong because there's pending reserved bytes, + * so make sure we do the block group cleanup afterwards. + */ + btrfs_free_block_groups(fs_info); + iput(fs_info->btree_inode); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY -- cgit v1.2.3-59-g8ed1b