aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2015-09-08 17:08:37 +0800
committerChris Mason <clm@fb.com>2015-10-21 18:37:47 -0700
commit297d750b9f8d7e6f2dbdf8abc5aa3b5c656affdc (patch)
treebd755d89f3c4d7f9c47751772aea2221761d76a2 /fs/btrfs
parentbtrfs: delayed_ref: Add new function to record reserved space into delayed ref (diff)
downloadlinux-dev-297d750b9f8d7e6f2dbdf8abc5aa3b5c656affdc.tar.xz
linux-dev-297d750b9f8d7e6f2dbdf8abc5aa3b5c656affdc.zip
btrfs: delayed_ref: release and free qgroup reserved at proper timing
Qgroup reserved space needs to be released from inode dirty map and get freed at different timing: 1) Release when the metadata is written into tree After corresponding metadata is written into tree, any newer write will be COWed(don't include NOCOW case yet). So we must release its range from inode dirty range map, or we will forget to reserve needed range, causing accounting exceeding the limit. 2) Free reserved bytes when delayed ref is run When delayed refs are run, qgroup accounting will follow soon and turn the reserved bytes into rfer/excl numbers. As run_delayed_refs and qgroup accounting are all done at commit_transaction() time, we are safe to free reserved space in run_delayed_ref time(). With these timing to release/free reserved space, we should be able to resolve the long existing qgroup reserve space leak problem. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c5
-rw-r--r--fs/btrfs/inode.c10
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/qgroup.h18
4 files changed, 34 insertions, 4 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2df4bc77f5b4..6c7927cd4f41 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2345,6 +2345,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
node->num_bytes);
}
}
+
+ /* Also free its reserved qgroup space */
+ btrfs_qgroup_free_delayed_ref(root->fs_info,
+ head->qgroup_ref_root,
+ head->qgroup_reserved);
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5ce55f6eefce..c21ed7b14691 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2119,6 +2119,16 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
+ if (ret < 0)
+ goto out;
+ /*
+ * Release the reserved range from inode dirty range map, and
+ * move it to delayed ref codes, as now accounting only happens at
+ * commit_transaction() time.
+ */
+ btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
+ ret = btrfs_add_delayed_qgroup_reserve(root->fs_info, trans,
+ root->objectid, disk_bytenr, ram_bytes);
out:
btrfs_free_path(path);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e05d1f6aa293..75d8e584c3e5 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2116,14 +2116,13 @@ out:
return ret;
}
-void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
+ u64 ref_root, u64 num_bytes)
{
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct ulist_node *unode;
struct ulist_iterator uiter;
- u64 ref_root = root->root_key.objectid;
int ret = 0;
if (!is_fstree(ref_root))
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 564eb2147740..80924aeceb09 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -72,7 +72,23 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
struct btrfs_qgroup_inherit *inherit);
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
-void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
+void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
+ u64 ref_root, u64 num_bytes);
+static inline void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+{
+ return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
+ num_bytes);
+}
+
+/*
+ * TODO: Add proper trace point for it, as btrfs_qgroup_free() is
+ * called by everywhere, can't provide good trace for delayed ref case.
+ */
+static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
+ u64 ref_root, u64 num_bytes)
+{
+ btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
+}
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);