From 1972708a897e99b25cd7d246bd37d44a592c4b54 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Mon, 4 Feb 2019 15:19:57 -0500 Subject: btrfs: add helpers for compression type and level It is very easy to miss places that rely on a certain bitshifting for decoding the type_level overloading. Add helpers to do this instead. Cc: Omar Sandoval Reviewed-by: Nikolay Borisov Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs/btrfs/compression.h') diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index ddda9b80bf20..004db0b3111b 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -64,6 +64,16 @@ struct compressed_bio { u32 sums; }; +static inline unsigned int btrfs_compress_type(unsigned int type_level) +{ + return (type_level & 0xF); +} + +static inline unsigned int btrfs_compress_level(unsigned int type_level) +{ + return ((type_level & 0xF0) >> 4); +} + void __init btrfs_init_compress(void); void __cold btrfs_exit_compress(void); -- cgit v1.2.3-59-g8ed1b From ca4ac360af94964906149efe166453ac83ae7c43 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Mon, 4 Feb 2019 15:19:59 -0500 Subject: btrfs: manage heuristic workspace as index 0 While the heuristic workspaces aren't really compression workspaces, they use the same interface for managing them. So rather than branching, let's just handle them once again as the index 0 compression type. Reviewed-by: Nikolay Borisov Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 112 +++++++++++++------------------------------------ fs/btrfs/compression.h | 4 ++ 2 files changed, 34 insertions(+), 82 deletions(-) (limited to 'fs/btrfs/compression.h') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d098df768b67..7034cf2749e6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -769,6 +769,11 @@ fail: return ERR_PTR(-ENOMEM); } +const struct btrfs_compress_op btrfs_heuristic_compress = { + .alloc_workspace = alloc_heuristic_ws, + .free_workspace = free_heuristic_ws, +}; + struct workspace_manager { struct list_head idle_ws; spinlock_t ws_lock; @@ -780,11 +785,11 @@ struct workspace_manager { wait_queue_head_t ws_wait; }; -static struct workspace_manager wsm[BTRFS_COMPRESS_TYPES]; - -static struct workspace_manager btrfs_heuristic_ws; +static struct workspace_manager wsm[BTRFS_NR_WORKSPACE_MANAGERS]; static const struct btrfs_compress_op * const btrfs_compress_op[] = { + /* The heuristic is represented as compression type 0 */ + &btrfs_heuristic_compress, &btrfs_zlib_compress, &btrfs_lzo_compress, &btrfs_zstd_compress, @@ -795,22 +800,7 @@ void __init btrfs_init_compress(void) struct list_head *workspace; int i; - INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); - spin_lock_init(&btrfs_heuristic_ws.ws_lock); - atomic_set(&btrfs_heuristic_ws.total_ws, 0); - init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); - - workspace = alloc_heuristic_ws(); - if (IS_ERR(workspace)) { - pr_warn( - "BTRFS: cannot preallocate heuristic workspace, will try later\n"); - } else { - atomic_set(&btrfs_heuristic_ws.total_ws, 1); - btrfs_heuristic_ws.free_ws = 1; - list_add(workspace, &btrfs_heuristic_ws.idle_ws); - } - - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) { INIT_LIST_HEAD(&wsm[i].idle_ws); spin_lock_init(&wsm[i].ws_lock); atomic_set(&wsm[i].total_ws, 0); @@ -837,11 +827,10 @@ void __init btrfs_init_compress(void) * Preallocation makes a forward progress guarantees and we do not return * errors. */ -static struct list_head *__find_workspace(int type, bool heuristic) +static struct list_head *find_workspace(int type) { struct list_head *workspace; int cpus = num_online_cpus(); - int idx = type - 1; unsigned nofs_flag; struct list_head *idle_ws; spinlock_t *ws_lock; @@ -849,19 +838,11 @@ static struct list_head *__find_workspace(int type, bool heuristic) wait_queue_head_t *ws_wait; int *free_ws; - if (heuristic) { - idle_ws = &btrfs_heuristic_ws.idle_ws; - ws_lock = &btrfs_heuristic_ws.ws_lock; - total_ws = &btrfs_heuristic_ws.total_ws; - ws_wait = &btrfs_heuristic_ws.ws_wait; - free_ws = &btrfs_heuristic_ws.free_ws; - } else { - idle_ws = &wsm[idx].idle_ws; - ws_lock = &wsm[idx].ws_lock; - total_ws = &wsm[idx].total_ws; - ws_wait = &wsm[idx].ws_wait; - free_ws = &wsm[idx].free_ws; - } + idle_ws = &wsm[type].idle_ws; + ws_lock = &wsm[type].ws_lock; + total_ws = &wsm[type].total_ws; + ws_wait = &wsm[type].ws_wait; + free_ws = &wsm[type].free_ws; again: spin_lock(ws_lock); @@ -892,10 +873,7 @@ again: * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - if (heuristic) - workspace = alloc_heuristic_ws(); - else - workspace = btrfs_compress_op[idx]->alloc_workspace(); + workspace = btrfs_compress_op[type]->alloc_workspace(); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { @@ -926,38 +904,23 @@ again: return workspace; } -static struct list_head *find_workspace(int type) -{ - return __find_workspace(type, false); -} - /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ -static void __free_workspace(int type, struct list_head *workspace, - bool heuristic) +static void free_workspace(int type, struct list_head *workspace) { - int idx = type - 1; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; - if (heuristic) { - idle_ws = &btrfs_heuristic_ws.idle_ws; - ws_lock = &btrfs_heuristic_ws.ws_lock; - total_ws = &btrfs_heuristic_ws.total_ws; - ws_wait = &btrfs_heuristic_ws.ws_wait; - free_ws = &btrfs_heuristic_ws.free_ws; - } else { - idle_ws = &wsm[idx].idle_ws; - ws_lock = &wsm[idx].ws_lock; - total_ws = &wsm[idx].total_ws; - ws_wait = &wsm[idx].ws_wait; - free_ws = &wsm[idx].free_ws; - } + idle_ws = &wsm[type].idle_ws; + ws_lock = &wsm[type].ws_lock; + total_ws = &wsm[type].total_ws; + ws_wait = &wsm[type].ws_wait; + free_ws = &wsm[type].free_ws; spin_lock(ws_lock); if (*free_ws <= num_online_cpus()) { @@ -968,20 +931,12 @@ static void __free_workspace(int type, struct list_head *workspace, } spin_unlock(ws_lock); - if (heuristic) - free_heuristic_ws(workspace); - else - btrfs_compress_op[idx]->free_workspace(workspace); + btrfs_compress_op[type]->free_workspace(workspace); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); } -static void free_workspace(int type, struct list_head *ws) -{ - return __free_workspace(type, ws, false); -} - /* * cleanup function for module exit */ @@ -990,14 +945,7 @@ static void free_workspaces(void) struct list_head *workspace; int i; - while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { - workspace = btrfs_heuristic_ws.idle_ws.next; - list_del(workspace); - free_heuristic_ws(workspace); - atomic_dec(&btrfs_heuristic_ws.total_ws); - } - - for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) { while (!list_empty(&wsm[i].idle_ws)) { workspace = wsm[i].idle_ws.next; list_del(workspace); @@ -1042,8 +990,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, workspace = find_workspace(type); - btrfs_compress_op[type - 1]->set_level(workspace, type_level); - ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, + btrfs_compress_op[type]->set_level(workspace, type_level); + ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, start, pages, out_pages, total_in, total_out); @@ -1072,7 +1020,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) int type = cb->compress_type; workspace = find_workspace(type); - ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); + ret = btrfs_compress_op[type]->decompress_bio(workspace, cb); free_workspace(type, workspace); return ret; @@ -1091,7 +1039,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, workspace = find_workspace(type); - ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, + ret = btrfs_compress_op[type]->decompress(workspace, data_in, dest_page, start_byte, srclen, destlen); @@ -1512,7 +1460,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, */ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) { - struct list_head *ws_list = __find_workspace(0, true); + struct list_head *ws_list = find_workspace(0); struct heuristic_ws *ws; u32 i; u8 byte; @@ -1581,7 +1529,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) } out: - __free_workspace(0, ws_list, true); + free_workspace(0, ws_list); return ret; } diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 004db0b3111b..9a0e73c65b87 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -132,6 +132,10 @@ struct btrfs_compress_op { void (*set_level)(struct list_head *ws, unsigned int type); }; +/* The heuristic workspaces are managed via the 0th workspace manager */ +#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1) + +extern const struct btrfs_compress_op btrfs_heuristic_compress; extern const struct btrfs_compress_op btrfs_zlib_compress; extern const struct btrfs_compress_op btrfs_lzo_compress; extern const struct btrfs_compress_op btrfs_zstd_compress; -- cgit v1.2.3-59-g8ed1b From 92ee55303616a18135be91deff51799a5de81f9a Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Mon, 4 Feb 2019 15:20:03 -0500 Subject: btrfs: move to function pointers for get/put workspaces The previous patch added generic helpers for get_workspace() and put_workspace(). Now, we can migrate ownership of the workspace_manager to be in the compression type code as the compression code itself doesn't care beyond being able to get a workspace. The init/cleanup and get/put methods are abstracted so each compression algorithm can decide how they want to manage their workspaces. Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 101 +++++++++++++++++++++++++++---------------------- fs/btrfs/compression.h | 26 +++++++++++++ fs/btrfs/lzo.c | 26 +++++++++++++ fs/btrfs/zlib.c | 26 +++++++++++++ fs/btrfs/zstd.c | 26 +++++++++++++ 5 files changed, 160 insertions(+), 45 deletions(-) (limited to 'fs/btrfs/compression.h') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 0240649fb3ac..7240f8df0ea2 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -730,6 +730,28 @@ struct heuristic_ws { struct list_head list; }; +static struct workspace_manager heuristic_wsm; + +static void heuristic_init_workspace_manager(void) +{ + btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress); +} + +static void heuristic_cleanup_workspace_manager(void) +{ + btrfs_cleanup_workspace_manager(&heuristic_wsm); +} + +static struct list_head *heuristic_get_workspace(void) +{ + return btrfs_get_workspace(&heuristic_wsm); +} + +static void heuristic_put_workspace(struct list_head *ws) +{ + btrfs_put_workspace(&heuristic_wsm, ws); +} + static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; @@ -770,24 +792,14 @@ fail: } const struct btrfs_compress_op btrfs_heuristic_compress = { + .init_workspace_manager = heuristic_init_workspace_manager, + .cleanup_workspace_manager = heuristic_cleanup_workspace_manager, + .get_workspace = heuristic_get_workspace, + .put_workspace = heuristic_put_workspace, .alloc_workspace = alloc_heuristic_ws, .free_workspace = free_heuristic_ws, }; -struct workspace_manager { - const struct btrfs_compress_op *ops; - struct list_head idle_ws; - spinlock_t ws_lock; - /* Number of free workspaces */ - int free_ws; - /* Total number of allocated workspaces */ - atomic_t total_ws; - /* Waiters for a free workspace */ - wait_queue_head_t ws_wait; -}; - -static struct workspace_manager wsm[BTRFS_NR_WORKSPACE_MANAGERS]; - static const struct btrfs_compress_op * const btrfs_compress_op[] = { /* The heuristic is represented as compression type 0 */ &btrfs_heuristic_compress, @@ -796,34 +808,34 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zstd_compress, }; -static void btrfs_init_workspace_manager(int type) +void btrfs_init_workspace_manager(struct workspace_manager *wsm, + const struct btrfs_compress_op *ops) { - struct workspace_manager *wsman = &wsm[type]; struct list_head *workspace; - wsman->ops = btrfs_compress_op[type]; + wsm->ops = ops; - INIT_LIST_HEAD(&wsman->idle_ws); - spin_lock_init(&wsman->ws_lock); - atomic_set(&wsman->total_ws, 0); - init_waitqueue_head(&wsman->ws_wait); + INIT_LIST_HEAD(&wsm->idle_ws); + spin_lock_init(&wsm->ws_lock); + atomic_set(&wsm->total_ws, 0); + init_waitqueue_head(&wsm->ws_wait); /* * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ - workspace = wsman->ops->alloc_workspace(); + workspace = wsm->ops->alloc_workspace(); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); } else { - atomic_set(&wsman->total_ws, 1); - wsman->free_ws = 1; - list_add(workspace, &wsman->idle_ws); + atomic_set(&wsm->total_ws, 1); + wsm->free_ws = 1; + list_add(workspace, &wsm->idle_ws); } } -static void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) +void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) { struct list_head *ws; @@ -841,7 +853,7 @@ static void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) * Preallocation makes a forward progress guarantees and we do not return * errors. */ -static struct list_head *btrfs_get_workspace(struct workspace_manager *wsman) +struct list_head *btrfs_get_workspace(struct workspace_manager *wsm) { struct list_head *workspace; int cpus = num_online_cpus(); @@ -852,11 +864,11 @@ static struct list_head *btrfs_get_workspace(struct workspace_manager *wsman) wait_queue_head_t *ws_wait; int *free_ws; - idle_ws = &wsman->idle_ws; - ws_lock = &wsman->ws_lock; - total_ws = &wsman->total_ws; - ws_wait = &wsman->ws_wait; - free_ws = &wsman->free_ws; + idle_ws = &wsm->idle_ws; + ws_lock = &wsm->ws_lock; + total_ws = &wsm->total_ws; + ws_wait = &wsm->ws_wait; + free_ws = &wsm->free_ws; again: spin_lock(ws_lock); @@ -887,7 +899,7 @@ again: * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - workspace = wsman->ops->alloc_workspace(); + workspace = wsm->ops->alloc_workspace(); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { @@ -920,15 +932,14 @@ again: static struct list_head *get_workspace(int type) { - return btrfs_get_workspace(&wsm[type]); + return btrfs_compress_op[type]->get_workspace(); } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ -static void btrfs_put_workspace(struct workspace_manager *wsman, - struct list_head *ws) +void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws) { struct list_head *idle_ws; spinlock_t *ws_lock; @@ -936,11 +947,11 @@ static void btrfs_put_workspace(struct workspace_manager *wsman, wait_queue_head_t *ws_wait; int *free_ws; - idle_ws = &wsman->idle_ws; - ws_lock = &wsman->ws_lock; - total_ws = &wsman->total_ws; - ws_wait = &wsman->ws_wait; - free_ws = &wsman->free_ws; + idle_ws = &wsm->idle_ws; + ws_lock = &wsm->ws_lock; + total_ws = &wsm->total_ws; + ws_wait = &wsm->ws_wait; + free_ws = &wsm->free_ws; spin_lock(ws_lock); if (*free_ws <= num_online_cpus()) { @@ -951,7 +962,7 @@ static void btrfs_put_workspace(struct workspace_manager *wsman, } spin_unlock(ws_lock); - wsman->ops->free_workspace(ws); + wsm->ops->free_workspace(ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); @@ -959,7 +970,7 @@ wake: static void put_workspace(int type, struct list_head *ws) { - return btrfs_put_workspace(&wsm[type], ws); + return btrfs_compress_op[type]->put_workspace(ws); } /* @@ -1059,7 +1070,7 @@ void __init btrfs_init_compress(void) int i; for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_init_workspace_manager(i); + btrfs_compress_op[i]->init_workspace_manager(); } void __cold btrfs_exit_compress(void) @@ -1067,7 +1078,7 @@ void __cold btrfs_exit_compress(void) int i; for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_cleanup_workspace_manager(&wsm[i]); + btrfs_compress_op[i]->cleanup_workspace_manager(); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 9a0e73c65b87..e298aa9e6b33 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -107,7 +107,33 @@ enum btrfs_compression_type { BTRFS_COMPRESS_TYPES = 3, }; +struct workspace_manager { + const struct btrfs_compress_op *ops; + struct list_head idle_ws; + spinlock_t ws_lock; + /* Number of free workspaces */ + int free_ws; + /* Total number of allocated workspaces */ + atomic_t total_ws; + /* Waiters for a free workspace */ + wait_queue_head_t ws_wait; +}; + +void btrfs_init_workspace_manager(struct workspace_manager *wsm, + const struct btrfs_compress_op *ops); +struct list_head *btrfs_get_workspace(struct workspace_manager *wsm); +void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); +void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm); + struct btrfs_compress_op { + void (*init_workspace_manager)(void); + + void (*cleanup_workspace_manager)(void); + + struct list_head *(*get_workspace)(void); + + void (*put_workspace)(struct list_head *ws); + struct list_head *(*alloc_workspace)(void); void (*free_workspace)(struct list_head *workspace); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 90639140439f..f0837b2c8e94 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -61,6 +61,28 @@ struct workspace { struct list_head list; }; +static struct workspace_manager wsm; + +static void lzo_init_workspace_manager(void) +{ + btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress); +} + +static void lzo_cleanup_workspace_manager(void) +{ + btrfs_cleanup_workspace_manager(&wsm); +} + +static struct list_head *lzo_get_workspace(void) +{ + return btrfs_get_workspace(&wsm); +} + +static void lzo_put_workspace(struct list_head *ws) +{ + btrfs_put_workspace(&wsm, ws); +} + static void lzo_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -490,6 +512,10 @@ static void lzo_set_level(struct list_head *ws, unsigned int type) } const struct btrfs_compress_op btrfs_lzo_compress = { + .init_workspace_manager = lzo_init_workspace_manager, + .cleanup_workspace_manager = lzo_cleanup_workspace_manager, + .get_workspace = lzo_get_workspace, + .put_workspace = lzo_put_workspace, .alloc_workspace = lzo_alloc_workspace, .free_workspace = lzo_free_workspace, .compress_pages = lzo_compress_pages, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 2bd655c4f8b4..773d1d70ceec 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -27,6 +27,28 @@ struct workspace { int level; }; +static struct workspace_manager wsm; + +static void zlib_init_workspace_manager(void) +{ + btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress); +} + +static void zlib_cleanup_workspace_manager(void) +{ + btrfs_cleanup_workspace_manager(&wsm); +} + +static struct list_head *zlib_get_workspace(void) +{ + return btrfs_get_workspace(&wsm); +} + +static void zlib_put_workspace(struct list_head *ws) +{ + btrfs_put_workspace(&wsm, ws); +} + static void zlib_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -402,6 +424,10 @@ static void zlib_set_level(struct list_head *ws, unsigned int type) } const struct btrfs_compress_op btrfs_zlib_compress = { + .init_workspace_manager = zlib_init_workspace_manager, + .cleanup_workspace_manager = zlib_cleanup_workspace_manager, + .get_workspace = zlib_get_workspace, + .put_workspace = zlib_put_workspace, .alloc_workspace = zlib_alloc_workspace, .free_workspace = zlib_free_workspace, .compress_pages = zlib_compress_pages, diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index af6ec59972f5..b06eaf171be7 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -41,6 +41,28 @@ struct workspace { ZSTD_outBuffer out_buf; }; +static struct workspace_manager wsm; + +static void zstd_init_workspace_manager(void) +{ + btrfs_init_workspace_manager(&wsm, &btrfs_zstd_compress); +} + +static void zstd_cleanup_workspace_manager(void) +{ + btrfs_cleanup_workspace_manager(&wsm); +} + +static struct list_head *zstd_get_workspace(void) +{ + return btrfs_get_workspace(&wsm); +} + +static void zstd_put_workspace(struct list_head *ws) +{ + btrfs_put_workspace(&wsm, ws); +} + static void zstd_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -424,6 +446,10 @@ static void zstd_set_level(struct list_head *ws, unsigned int type) } const struct btrfs_compress_op btrfs_zstd_compress = { + .init_workspace_manager = zstd_init_workspace_manager, + .cleanup_workspace_manager = zstd_cleanup_workspace_manager, + .get_workspace = zstd_get_workspace, + .put_workspace = zstd_put_workspace, .alloc_workspace = zstd_alloc_workspace, .free_workspace = zstd_free_workspace, .compress_pages = zstd_compress_pages, -- cgit v1.2.3-59-g8ed1b From 7bf4994304e27454c5cf99de1d43033cb29b34fd Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Mon, 4 Feb 2019 15:20:04 -0500 Subject: btrfs: plumb level through the compression interface Zlib compression supports multiple levels, but doesn't require changing in how a workspace itself is created and managed. Zstd introduces a different memory requirement such that higher levels of compression require more memory. This requires changes in how the alloc()/get() methods work for zstd. This pach plumbs compression level through the interface as a parameter in preparation for zstd compression levels. This gives the compression types opportunity to create/manage based on the compression level. Reviewed-by: Nikolay Borisov Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 31 ++++++++++++++++--------------- fs/btrfs/compression.h | 7 ++++--- fs/btrfs/lzo.c | 6 +++--- fs/btrfs/zlib.c | 7 ++++--- fs/btrfs/zstd.c | 6 +++--- 5 files changed, 30 insertions(+), 27 deletions(-) (limited to 'fs/btrfs/compression.h') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7240f8df0ea2..ccd6bb2061f6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -742,9 +742,9 @@ static void heuristic_cleanup_workspace_manager(void) btrfs_cleanup_workspace_manager(&heuristic_wsm); } -static struct list_head *heuristic_get_workspace(void) +static struct list_head *heuristic_get_workspace(unsigned int level) { - return btrfs_get_workspace(&heuristic_wsm); + return btrfs_get_workspace(&heuristic_wsm, level); } static void heuristic_put_workspace(struct list_head *ws) @@ -764,7 +764,7 @@ static void free_heuristic_ws(struct list_head *ws) kfree(workspace); } -static struct list_head *alloc_heuristic_ws(void) +static struct list_head *alloc_heuristic_ws(unsigned int level) { struct heuristic_ws *ws; @@ -824,7 +824,7 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm, * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ - workspace = wsm->ops->alloc_workspace(); + workspace = wsm->ops->alloc_workspace(0); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); @@ -853,7 +853,8 @@ void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) * Preallocation makes a forward progress guarantees and we do not return * errors. */ -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm) +struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, + unsigned int level) { struct list_head *workspace; int cpus = num_online_cpus(); @@ -899,7 +900,7 @@ again: * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - workspace = wsm->ops->alloc_workspace(); + workspace = wsm->ops->alloc_workspace(level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { @@ -930,9 +931,9 @@ again: return workspace; } -static struct list_head *get_workspace(int type) +static struct list_head *get_workspace(int type, int level) { - return btrfs_compress_op[type]->get_workspace(); + return btrfs_compress_op[type]->get_workspace(level); } /* @@ -1003,12 +1004,13 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, unsigned long *total_out) { int type = btrfs_compress_type(type_level); + int level = btrfs_compress_level(type_level); struct list_head *workspace; int ret; - workspace = get_workspace(type); + workspace = get_workspace(type, level); - btrfs_compress_op[type]->set_level(workspace, type_level); + btrfs_compress_op[type]->set_level(workspace, level); ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, start, pages, out_pages, @@ -1037,7 +1039,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) int ret; int type = cb->compress_type; - workspace = get_workspace(type); + workspace = get_workspace(type, 0); ret = btrfs_compress_op[type]->decompress_bio(workspace, cb); put_workspace(type, workspace); @@ -1055,13 +1057,12 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, struct list_head *workspace; int ret; - workspace = get_workspace(type); - + workspace = get_workspace(type, 0); ret = btrfs_compress_op[type]->decompress(workspace, data_in, dest_page, start_byte, srclen, destlen); - put_workspace(type, workspace); + return ret; } @@ -1489,7 +1490,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, */ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) { - struct list_head *ws_list = get_workspace(0); + struct list_head *ws_list = get_workspace(0, 0); struct heuristic_ws *ws; u32 i; u8 byte; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index e298aa9e6b33..2ab8b2f29d88 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -121,7 +121,8 @@ struct workspace_manager { void btrfs_init_workspace_manager(struct workspace_manager *wsm, const struct btrfs_compress_op *ops); -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm); +struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, + unsigned int level); void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm); @@ -130,11 +131,11 @@ struct btrfs_compress_op { void (*cleanup_workspace_manager)(void); - struct list_head *(*get_workspace)(void); + struct list_head *(*get_workspace)(unsigned int level); void (*put_workspace)(struct list_head *ws); - struct list_head *(*alloc_workspace)(void); + struct list_head *(*alloc_workspace)(unsigned int level); void (*free_workspace)(struct list_head *workspace); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index f0837b2c8e94..f132af45a924 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -73,9 +73,9 @@ static void lzo_cleanup_workspace_manager(void) btrfs_cleanup_workspace_manager(&wsm); } -static struct list_head *lzo_get_workspace(void) +static struct list_head *lzo_get_workspace(unsigned int level) { - return btrfs_get_workspace(&wsm); + return btrfs_get_workspace(&wsm, level); } static void lzo_put_workspace(struct list_head *ws) @@ -93,7 +93,7 @@ static void lzo_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *lzo_alloc_workspace(void) +static struct list_head *lzo_alloc_workspace(unsigned int level) { struct workspace *workspace; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 773d1d70ceec..fc883a14ecbf 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -39,9 +39,9 @@ static void zlib_cleanup_workspace_manager(void) btrfs_cleanup_workspace_manager(&wsm); } -static struct list_head *zlib_get_workspace(void) +static struct list_head *zlib_get_workspace(unsigned int level) { - return btrfs_get_workspace(&wsm); + return btrfs_get_workspace(&wsm, level); } static void zlib_put_workspace(struct list_head *ws) @@ -58,7 +58,7 @@ static void zlib_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zlib_alloc_workspace(void) +static struct list_head *zlib_alloc_workspace(unsigned int level) { struct workspace *workspace; int workspacesize; @@ -70,6 +70,7 @@ static struct list_head *zlib_alloc_workspace(void) workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL); + workspace->level = level; workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!workspace->strm.workspace || !workspace->buf) goto fail; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index b06eaf171be7..404101864220 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -53,9 +53,9 @@ static void zstd_cleanup_workspace_manager(void) btrfs_cleanup_workspace_manager(&wsm); } -static struct list_head *zstd_get_workspace(void) +static struct list_head *zstd_get_workspace(unsigned int level) { - return btrfs_get_workspace(&wsm); + return btrfs_get_workspace(&wsm, level); } static void zstd_put_workspace(struct list_head *ws) @@ -72,7 +72,7 @@ static void zstd_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zstd_alloc_workspace(void) +static struct list_head *zstd_alloc_workspace(unsigned int level) { ZSTD_parameters params = zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT); -- cgit v1.2.3-59-g8ed1b From d0ab62ce2ded36294f3a02156415b8157d660b95 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Mon, 4 Feb 2019 15:20:05 -0500 Subject: btrfs: change set_level() to bound the level passed in Currently, the only user of set_level() is zlib which sets an internal workspace parameter. As level is now plumbed into get_workspace(), this can be handled there rather than separately. This repurposes set_level() to bound the level passed in so it can be used when setting the mounts compression level and as well as verifying the level before getting a workspace. The other benefit is this divides the meaning of compress(0) and get_workspace(0). The former means we want to use the default compression level of the compression type. The latter means we can use any workspace available. Signed-off-by: Dennis Zhou Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 25 +++++++++++++++++-------- fs/btrfs/compression.h | 9 +++++++-- fs/btrfs/lzo.c | 3 ++- fs/btrfs/super.c | 4 +++- fs/btrfs/zlib.c | 18 ++++++++++-------- fs/btrfs/zstd.c | 3 ++- 6 files changed, 41 insertions(+), 21 deletions(-) (limited to 'fs/btrfs/compression.h') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ccd6bb2061f6..eb8e20b740d6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1009,8 +1009,6 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, int ret; workspace = get_workspace(type, level); - - btrfs_compress_op[type]->set_level(workspace, level); ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, start, pages, out_pages, @@ -1563,14 +1561,25 @@ out: return ret; } -unsigned int btrfs_compress_str2level(const char *str) +/* + * Convert the compression suffix (eg. after "zlib" starting with ":") to + * level, unrecognized string will set the default level + */ +unsigned int btrfs_compress_str2level(unsigned int type, const char *str) { - if (strncmp(str, "zlib", 4) != 0) + unsigned int level = 0; + int ret; + + if (!type) return 0; - /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ - if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) - return str[5] - '0'; + if (str[0] == ':') { + ret = kstrtouint(str + 1, 10, &level); + if (ret) + level = 0; + } + + level = btrfs_compress_op[type]->set_level(level); - return BTRFS_ZLIB_DEFAULT_LEVEL; + return level; } diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 2ab8b2f29d88..9976fe0f7526 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -97,7 +97,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); -unsigned btrfs_compress_str2level(const char *str); +unsigned int btrfs_compress_str2level(unsigned int type, const char *str); enum btrfs_compression_type { BTRFS_COMPRESS_NONE = 0, @@ -156,7 +156,12 @@ struct btrfs_compress_op { unsigned long start_byte, size_t srclen, size_t destlen); - void (*set_level)(struct list_head *ws, unsigned int type); + /* + * This bounds the level set by the user to be within range of a + * particular compression type. It returns the level that will be used + * if the level is out of bounds or the default if 0 is passed in. + */ + unsigned int (*set_level)(unsigned int level); }; /* The heuristic workspaces are managed via the 0th workspace manager */ diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index f132af45a924..579d53ae256f 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -507,8 +507,9 @@ out: return ret; } -static void lzo_set_level(struct list_head *ws, unsigned int type) +static unsigned int lzo_set_level(unsigned int level) { + return 0; } const struct btrfs_compress_op btrfs_lzo_compress = { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f9d13a30aa8a..9ac94c9d597d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -529,7 +529,9 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, if (token != Opt_compress && token != Opt_compress_force) info->compress_level = - btrfs_compress_str2level(args[0].from); + btrfs_compress_str2level( + BTRFS_COMPRESS_ZLIB, + args[0].from + 4); btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index fc883a14ecbf..b86b7ad6b900 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -41,7 +41,12 @@ static void zlib_cleanup_workspace_manager(void) static struct list_head *zlib_get_workspace(unsigned int level) { - return btrfs_get_workspace(&wsm, level); + struct list_head *ws = btrfs_get_workspace(&wsm, level); + struct workspace *workspace = list_entry(ws, struct workspace, list); + + workspace->level = level; + + return ws; } static void zlib_put_workspace(struct list_head *ws) @@ -413,15 +418,12 @@ next: return ret; } -static void zlib_set_level(struct list_head *ws, unsigned int type) +static unsigned int zlib_set_level(unsigned int level) { - struct workspace *workspace = list_entry(ws, struct workspace, list); - unsigned int level = btrfs_compress_level(type); - - if (level > 9) - level = 9; + if (!level) + return BTRFS_ZLIB_DEFAULT_LEVEL; - workspace->level = level > 0 ? level : 3; + return min_t(unsigned int, level, 9); } const struct btrfs_compress_op btrfs_zlib_compress = { diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 404101864220..43f3be755b8c 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -441,8 +441,9 @@ finish: return ret; } -static void zstd_set_level(struct list_head *ws, unsigned int type) +static unsigned int zstd_set_level(unsigned int level) { + return ZSTD_BTRFS_DEFAULT_LEVEL; } const struct btrfs_compress_op btrfs_zstd_compress = { -- cgit v1.2.3-59-g8ed1b