aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2019-02-04 15:20:00 -0500
committerDavid Sterba <dsterba@suse.com>2019-02-25 14:13:31 +0100
commit10b94a51cafb28d47ee6912248ed698c9ac183be (patch)
treee8ebc376e669220ab6eff57ddcf94ef260cf5d69 /fs/btrfs
parentbtrfs: manage heuristic workspace as index 0 (diff)
downloadlinux-dev-10b94a51cafb28d47ee6912248ed698c9ac183be.tar.xz
linux-dev-10b94a51cafb28d47ee6912248ed698c9ac183be.zip
btrfs: unify compression ops with workspace_manager
Make the workspace_manager own the interface operations rather than managing index-paired arrays for the workspace_manager and compression operations. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7034cf2749e6..3b069f02903b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -775,6 +775,7 @@ const struct btrfs_compress_op btrfs_heuristic_compress = {
};
struct workspace_manager {
+ const struct btrfs_compress_op *ops;
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -801,6 +802,8 @@ void __init btrfs_init_compress(void)
int i;
for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
+ wsm[i].ops = btrfs_compress_op[i];
+
INIT_LIST_HEAD(&wsm[i].idle_ws);
spin_lock_init(&wsm[i].ws_lock);
atomic_set(&wsm[i].total_ws, 0);
@@ -810,7 +813,7 @@ void __init btrfs_init_compress(void)
* Preallocate one workspace for each compression type so
* we can guarantee forward progress in the worst case
*/
- workspace = btrfs_compress_op[i]->alloc_workspace();
+ workspace = wsm[i].ops->alloc_workspace();
if (IS_ERR(workspace)) {
pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
@@ -873,7 +876,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = btrfs_compress_op[type]->alloc_workspace();
+ workspace = wsm[type].ops->alloc_workspace();
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -931,7 +934,7 @@ static void free_workspace(int type, struct list_head *workspace)
}
spin_unlock(ws_lock);
- btrfs_compress_op[type]->free_workspace(workspace);
+ wsm[type].ops->free_workspace(workspace);
atomic_dec(total_ws);
wake:
cond_wake_up(ws_wait);
@@ -949,7 +952,7 @@ static void free_workspaces(void)
while (!list_empty(&wsm[i].idle_ws)) {
workspace = wsm[i].idle_ws.next;
list_del(workspace);
- btrfs_compress_op[i]->free_workspace(workspace);
+ wsm[i].ops->free_workspace(workspace);
atomic_dec(&wsm[i].total_ws);
}
}