aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/zoned.c
diff options
context:
space:
mode:
authorNaohiro Aota <naohiro.aota@wdc.com>2022-05-03 17:48:51 -0700
committerDavid Sterba <dsterba@suse.com>2022-05-16 17:17:32 +0200
commitd70cbdda75da3f258118a558c087157e073229fb (patch)
tree1a4f3c0d12766438385d35e0e886e1517f20d23f /fs/btrfs/zoned.c
parentbtrfs: zoned: introduce btrfs_zoned_bg_is_full (diff)
downloadlinux-dev-d70cbdda75da3f258118a558c087157e073229fb.tar.xz
linux-dev-d70cbdda75da3f258118a558c087157e073229fb.zip
btrfs: zoned: consolidate zone finish functions
btrfs_zone_finish() and btrfs_zone_finish_endio() have similar code. Introduce do_zone_finish() to factor out the common code. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/zoned.c')
-rw-r--r--fs/btrfs/zoned.c137
1 files changed, 64 insertions, 73 deletions
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index cc0c5dd5a901..eb63b5ec3be8 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1873,20 +1873,14 @@ out_unlock:
return ret;
}
-int btrfs_zone_finish(struct btrfs_block_group *block_group)
+static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct map_lookup *map;
- struct btrfs_device *device;
- u64 physical;
+ bool need_zone_finish;
int ret = 0;
int i;
- if (!btrfs_is_zoned(fs_info))
- return 0;
-
- map = block_group->physical_map;
-
spin_lock(&block_group->lock);
if (!block_group->zone_is_active) {
spin_unlock(&block_group->lock);
@@ -1900,36 +1894,52 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
spin_unlock(&block_group->lock);
return -EAGAIN;
}
- spin_unlock(&block_group->lock);
-
- ret = btrfs_inc_block_group_ro(block_group, false);
- if (ret)
- return ret;
-
- /* Ensure all writes in this block group finish */
- btrfs_wait_block_group_reservations(block_group);
- /* No need to wait for NOCOW writers. Zoned mode does not allow that. */
- btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
- block_group->length);
-
- spin_lock(&block_group->lock);
/*
- * Bail out if someone already deactivated the block group, or
- * allocated space is left in the block group.
+ * If we are sure that the block group is full (= no more room left for
+ * new allocation) and the IO for the last usable block is completed, we
+ * don't need to wait for the other IOs. This holds because we ensure
+ * the sequential IO submissions using the ZONE_APPEND command for data
+ * and block_group->meta_write_pointer for metadata.
*/
- if (!block_group->zone_is_active) {
+ if (!fully_written) {
spin_unlock(&block_group->lock);
- btrfs_dec_block_group_ro(block_group);
- return 0;
- }
- if (block_group->reserved) {
- spin_unlock(&block_group->lock);
- btrfs_dec_block_group_ro(block_group);
- return -EAGAIN;
+ ret = btrfs_inc_block_group_ro(block_group, false);
+ if (ret)
+ return ret;
+
+ /* Ensure all writes in this block group finish */
+ btrfs_wait_block_group_reservations(block_group);
+ /* No need to wait for NOCOW writers. Zoned mode does not allow that */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
+ block_group->length);
+
+ spin_lock(&block_group->lock);
+
+ /*
+ * Bail out if someone already deactivated the block group, or
+ * allocated space is left in the block group.
+ */
+ if (!block_group->zone_is_active) {
+ spin_unlock(&block_group->lock);
+ btrfs_dec_block_group_ro(block_group);
+ return 0;
+ }
+
+ if (block_group->reserved) {
+ spin_unlock(&block_group->lock);
+ btrfs_dec_block_group_ro(block_group);
+ return -EAGAIN;
+ }
}
+ /*
+ * The block group is not fully allocated, so not fully written yet. We
+ * need to send ZONE_FINISH command to free up an active zone.
+ */
+ need_zone_finish = !btrfs_zoned_bg_is_full(block_group);
+
block_group->zone_is_active = 0;
block_group->alloc_offset = block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0;
@@ -1937,24 +1947,29 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
+ map = block_group->physical_map;
for (i = 0; i < map->num_stripes; i++) {
- device = map->stripes[i].dev;
- physical = map->stripes[i].physical;
+ struct btrfs_device *device = map->stripes[i].dev;
+ const u64 physical = map->stripes[i].physical;
if (device->zone_info->max_active_zones == 0)
continue;
- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
- physical >> SECTOR_SHIFT,
- device->zone_info->zone_size >> SECTOR_SHIFT,
- GFP_NOFS);
+ if (need_zone_finish) {
+ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+ physical >> SECTOR_SHIFT,
+ device->zone_info->zone_size >> SECTOR_SHIFT,
+ GFP_NOFS);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
btrfs_dev_clear_active_zone(device, physical);
}
- btrfs_dec_block_group_ro(block_group);
+
+ if (!fully_written)
+ btrfs_dec_block_group_ro(block_group);
spin_lock(&fs_info->zone_active_bgs_lock);
ASSERT(!list_empty(&block_group->active_bg_list));
@@ -1967,6 +1982,14 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
return 0;
}
+int btrfs_zone_finish(struct btrfs_block_group *block_group)
+{
+ if (!btrfs_is_zoned(block_group->fs_info))
+ return 0;
+
+ return do_zone_finish(block_group, false);
+}
+
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
{
struct btrfs_fs_info *fs_info = fs_devices->fs_info;
@@ -1998,9 +2021,6 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
{
struct btrfs_block_group *block_group;
- struct map_lookup *map;
- struct btrfs_device *device;
- u64 physical;
if (!btrfs_is_zoned(fs_info))
return;
@@ -2011,36 +2031,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
if (logical + length < block_group->start + block_group->zone_capacity)
goto out;
- spin_lock(&block_group->lock);
-
- if (!block_group->zone_is_active) {
- spin_unlock(&block_group->lock);
- goto out;
- }
-
- block_group->zone_is_active = 0;
- /* We should have consumed all the free space */
- ASSERT(block_group->alloc_offset == block_group->zone_capacity);
- ASSERT(block_group->free_space_ctl->free_space == 0);
- btrfs_clear_treelog_bg(block_group);
- btrfs_clear_data_reloc_bg(block_group);
- spin_unlock(&block_group->lock);
-
- map = block_group->physical_map;
- device = map->stripes[0].dev;
- physical = map->stripes[0].physical;
-
- if (!device->zone_info->max_active_zones)
- goto out;
-
- btrfs_dev_clear_active_zone(device, physical);
-
- spin_lock(&fs_info->zone_active_bgs_lock);
- ASSERT(!list_empty(&block_group->active_bg_list));
- list_del_init(&block_group->active_bg_list);
- spin_unlock(&fs_info->zone_active_bgs_lock);
-
- btrfs_put_block_group(block_group);
+ do_zone_finish(block_group, true);
out:
btrfs_put_block_group(block_group);