aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/zoned.c
diff options
context:
space:
mode:
authorNaohiro Aota <naohiro.aota@wdc.com>2021-08-19 21:19:22 +0900
committerDavid Sterba <dsterba@suse.com>2021-10-26 19:07:59 +0200
commita85f05e59bc15a83ad910dbcb71df5ad8fa77295 (patch)
treee1997a261f6eba923c30cd91a2542cabba42ec34 /fs/btrfs/zoned.c
parentbtrfs: move ffe_ctl one level up (diff)
downloadlinux-dev-a85f05e59bc15a83ad910dbcb71df5ad8fa77295.tar.xz
linux-dev-a85f05e59bc15a83ad910dbcb71df5ad8fa77295.zip
btrfs: zoned: avoid chunk allocation if active block group has enough space
The current extent allocator tries to allocate a new block group when the existing block groups do not have enough space. On a ZNS device, a new block group means a new active zone. If the number of active zones has already reached the max_active_zones, activating a new zone needs to finish an existing zone, leading to wasting the free space there. So, instead, it should reuse the existing active block groups as much as possible when we can't activate any other zones without sacrificing an already activated block group. While at it, I converted find_free_extent_update_loop() to check the found_extent() case early and made the other conditions simpler. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/zoned.c')
-rw-r--r--fs/btrfs/zoned.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 942a34771383..798069484054 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1873,3 +1873,34 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
return ret;
}
+
+bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, int raid_index)
+{
+ struct btrfs_device *device;
+ bool ret = false;
+
+ if (!btrfs_is_zoned(fs_devices->fs_info))
+ return true;
+
+ /* Non-single profiles are not supported yet */
+ if (raid_index != BTRFS_RAID_SINGLE)
+ return false;
+
+ /* Check if there is a device with active zones left */
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+
+ if (!device->bdev)
+ continue;
+
+ if (!zinfo->max_active_zones ||
+ atomic_read(&zinfo->active_zones_left)) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ return ret;
+}