aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2020-01-20 16:09:08 +0200
committerDavid Sterba <dsterba@suse.com>2020-03-23 17:01:36 +0100
commitf603bb94abbed5b5542cfb514ddfa3c5dda96bcc (patch)
treeab131720098f0d6ebd7a8deeefa0b293b785bb6e /fs/btrfs/disk-io.c
parentbtrfs: sysfs, unify handler name of devinfo/missing (diff)
downloadlinux-dev-f603bb94abbed5b5542cfb514ddfa3c5dda96bcc.tar.xz
linux-dev-f603bb94abbed5b5542cfb514ddfa3c5dda96bcc.zip
btrfs: Perform pinned cleanup directly in btrfs_destroy_delayed_refs
Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic for making pinned extents tracking per-transaction since btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context. Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs are going to be handled very closely, in btrfs_destroy_pinned_extent. To enable btrfs_pin_extent to take btrfs_trans_handle simply open code it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range on the range. This enables us to do less work in btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in contexts which have a valid btrfs_trans_handle. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 49daf6c946df..194c98a61095 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -42,6 +42,7 @@
#include "ref-verify.h"
#include "block-group.h"
#include "discard.h"
+#include "space-info.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
@@ -4308,9 +4309,30 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
- if (pin_bytes)
- btrfs_pin_extent(fs_info, head->bytenr,
- head->num_bytes, 1);
+ if (pin_bytes) {
+ struct btrfs_block_group *cache;
+
+ cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+ BUG_ON(!cache);
+
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+ cache->pinned += head->num_bytes;
+ btrfs_space_info_update_bytes_pinned(fs_info,
+ cache->space_info, head->num_bytes);
+ cache->reserved -= head->num_bytes;
+ cache->space_info->bytes_reserved -= head->num_bytes;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+ percpu_counter_add_batch(
+ &cache->space_info->total_bytes_pinned,
+ head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+ btrfs_put_block_group(cache);
+
+ btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+ head->bytenr + head->num_bytes - 1);
+ }
btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();