aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ref-verify.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 11:34:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 11:34:19 -0700
commit9f2e3a53f7ec9ef55e9d01bc29a6285d291c151e (patch)
treec25b0eb20dac1a39a6b55c521b2658dcceb7d532 /fs/btrfs/ref-verify.c
parentMerge branch 'stable-fodder' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs (diff)
parentbtrfs: Use kvmalloc for allocating compressed path context (diff)
downloadlinux-dev-9f2e3a53f7ec9ef55e9d01bc29a6285d291c151e.tar.xz
linux-dev-9f2e3a53f7ec9ef55e9d01bc29a6285d291c151e.zip
Merge tag 'for-5.2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "This time the majority of changes are cleanups, though there's still a number of changes of user interest. User visible changes: - better read time and write checks to catch errors early and before writing data to disk (to catch potential memory corruption on data that get checksummed) - qgroups + metadata relocation: last speed up patch int the series to address the slowness, there should be no overhead comparing balance with and without qgroups - FIEMAP ioctl does not start a transaction unnecessarily, this can result in a speed up and less blocking due to IO - LOGICAL_INO (v1, v2) does not start transaction unnecessarily, this can speed up the mentioned ioctl and scrub as well - fsync on files with many (but not too many) hardlinks is faster, finer decision if the links should be fsynced individually or completely - send tries harder to find ranges to clone - trim/discard will skip unallocated chunks that haven't been touched since the last mount Fixes: - send flushes delayed allocation before start, otherwise it could miss some changes in case of a very recent rw->ro switch of a subvolume - fix fallocate with qgroups that could lead to space accounting underflow, reported as a warning - trim/discard ioctl honours the requested range - starting send and dedupe on a subvolume at the same time will let only one of them succeed, this is to prevent changes that send could miss due to dedupe; both operations are restartable Core changes: - more tree-checker validations, errors reported by fuzzing tools: - device item - inode item - block group profiles - tracepoints for extent buffer locking - async cow preallocates memory to avoid errors happening too deep in the call chain - metadata reservations for delalloc reworked to better adapt in many-writers/low-space scenarios - improved space flushing logic for intense DIO vs buffered workloads - lots of cleanups - removed unused struct members - redundant argument removal - properties and xattrs - extent buffer locking - selftests - use common file type conversions - many-argument functions reduction" * tag 'for-5.2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (227 commits) btrfs: Use kvmalloc for allocating compressed path context btrfs: Factor out common extent locking code in submit_compressed_extents btrfs: Set io_tree only once in submit_compressed_extents btrfs: Replace clear_extent_bit with unlock_extent btrfs: Make compress_file_range take only struct async_chunk btrfs: Remove fs_info from struct async_chunk btrfs: Rename async_cow to async_chunk btrfs: Preallocate chunks in cow_file_range_async btrfs: reserve delalloc metadata differently btrfs: track DIO bytes in flight btrfs: merge calls of btrfs_setxattr and btrfs_setxattr_trans in btrfs_set_prop btrfs: delete unused function btrfs_set_prop_trans btrfs: start transaction in xattr_handler_set_prop btrfs: drop local copy of inode i_mode btrfs: drop old_fsflags in btrfs_ioctl_setflags btrfs: modify local copy of btrfs_inode flags btrfs: drop useless inode i_flags copy and restore btrfs: start transaction in btrfs_ioctl_setflags() btrfs: export btrfs_set_prop btrfs: refactor btrfs_set_props to validate externally ...
Diffstat (limited to 'fs/btrfs/ref-verify.c')
-rw-r--r--fs/btrfs/ref-verify.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index b283d3a6e837..5cec2c6970f2 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -659,36 +659,43 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
/*
* btrfs_ref_tree_mod: called when we modify a ref for a bytenr
- * @root: the root we are making this modification from.
- * @bytenr: the bytenr we are modifying.
- * @num_bytes: number of bytes.
- * @parent: the parent bytenr.
- * @ref_root: the original root owner of the bytenr.
- * @owner: level in the case of metadata, inode in the case of data.
- * @offset: 0 for metadata, file offset for data.
- * @action: the action that we are doing, this is the same as the delayed ref
- * action.
*
* This will add an action item to the given bytenr and do sanity checks to make
* sure we haven't messed something up. If we are making a new allocation and
* this block entry has history we will delete all previous actions as long as
* our sanity checks pass as they are no longer needed.
*/
-int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
- u64 parent, u64 ref_root, u64 owner, u64 offset,
- int action)
+int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ struct btrfs_ref *generic_ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct ref_entry *ref = NULL, *exist;
struct ref_action *ra = NULL;
struct block_entry *be = NULL;
struct root_entry *re = NULL;
+ int action = generic_ref->action;
int ret = 0;
- bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+ bool metadata;
+ u64 bytenr = generic_ref->bytenr;
+ u64 num_bytes = generic_ref->len;
+ u64 parent = generic_ref->parent;
+ u64 ref_root;
+ u64 owner;
+ u64 offset;
- if (!btrfs_test_opt(root->fs_info, REF_VERIFY))
+ if (!btrfs_test_opt(fs_info, REF_VERIFY))
return 0;
+ if (generic_ref->type == BTRFS_REF_METADATA) {
+ ref_root = generic_ref->tree_ref.root;
+ owner = generic_ref->tree_ref.level;
+ offset = 0;
+ } else {
+ ref_root = generic_ref->data_ref.ref_root;
+ owner = generic_ref->data_ref.ino;
+ offset = generic_ref->data_ref.offset;
+ }
+ metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+
ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
if (!ra || !ref) {
@@ -721,7 +728,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
INIT_LIST_HEAD(&ra->list);
ra->action = action;
- ra->root = root->root_key.objectid;
+ ra->root = generic_ref->real_root;
/*
* This is an allocation, preallocate the block_entry in case we haven't
@@ -734,7 +741,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* is and the new root objectid, so let's not treat the passed
* in root as if it really has a ref for this bytenr.
*/
- be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root);
+ be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
if (IS_ERR(be)) {
kfree(ra);
ret = PTR_ERR(be);
@@ -776,13 +783,13 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* one we want to lookup below when we modify the
* re->num_refs.
*/
- ref_root = root->root_key.objectid;
- re->root_objectid = root->root_key.objectid;
+ ref_root = generic_ref->real_root;
+ re->root_objectid = generic_ref->real_root;
re->num_refs = 0;
}
- spin_lock(&root->fs_info->ref_verify_lock);
- be = lookup_block_entry(&root->fs_info->block_tree, bytenr);
+ spin_lock(&fs_info->ref_verify_lock);
+ be = lookup_block_entry(&fs_info->block_tree, bytenr);
if (!be) {
btrfs_err(fs_info,
"trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
@@ -851,7 +858,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* didn't think of some other corner case.
*/
btrfs_err(fs_info, "failed to find root %llu for %llu",
- root->root_key.objectid, be->bytenr);
+ generic_ref->real_root, be->bytenr);
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
kfree(ra);
@@ -870,7 +877,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
list_add_tail(&ra->list, &be->actions);
ret = 0;
out_unlock:
- spin_unlock(&root->fs_info->ref_verify_lock);
+ spin_unlock(&fs_info->ref_verify_lock);
out:
if (ret)
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);