aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2019-03-12 17:20:27 +0200
committerDavid Sterba <dsterba@suse.com>2019-05-02 13:48:19 +0200
commit1368c6dac7f10a18195fa4ebf072799a727fd4a6 (patch)
tree471b2df3c3ea9741442cd82c8d5fdb38ec4a6afd /fs/btrfs/inode.c
parentbtrfs: Remove fs_info from struct async_chunk (diff)
downloadlinux-dev-1368c6dac7f10a18195fa4ebf072799a727fd4a6.tar.xz
linux-dev-1368c6dac7f10a18195fa4ebf072799a727fd4a6.zip
btrfs: Make compress_file_range take only struct async_chunk
All context this function needs is held within struct async_chunk. Currently we not only pass the struct but also every individual member. This is redundant, simplify it by only passing struct async_chunk and leaving it to compress_file_range to extract the values it requires. No functional changes. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to '')
-rw-r--r--fs/btrfs/inode.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b594a7468716..f83c8edd1703 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -439,14 +439,14 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
* are written in the same order that the flusher thread sent them
* down.
*/
-static noinline void compress_file_range(struct inode *inode,
- struct page *locked_page,
- u64 start, u64 end,
- struct async_chunk *async_chunk,
- int *num_added)
+static noinline void compress_file_range(struct async_chunk *async_chunk,
+ int *num_added)
{
+ struct inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 blocksize = fs_info->sectorsize;
+ u64 start = async_chunk->start;
+ u64 end = async_chunk->end;
u64 actual_end;
int ret = 0;
struct page **pages = NULL;
@@ -665,9 +665,9 @@ cleanup_and_bail_uncompressed:
* to our extent and set things up for the async work queue to run
* cow_file_range to do the normal delalloc dance.
*/
- if (page_offset(locked_page) >= start &&
- page_offset(locked_page) <= end)
- __set_page_dirty_nobuffers(locked_page);
+ if (page_offset(async_chunk->locked_page) >= start &&
+ page_offset(async_chunk->locked_page) <= end)
+ __set_page_dirty_nobuffers(async_chunk->locked_page);
/* unlocked later on in the async handlers */
if (redirty)
@@ -1132,9 +1132,7 @@ static noinline void async_cow_start(struct btrfs_work *work)
async_chunk = container_of(work, struct async_chunk, work);
- compress_file_range(async_chunk->inode, async_chunk->locked_page,
- async_chunk->start, async_chunk->end, async_chunk,
- &num_added);
+ compress_file_range(async_chunk, &num_added);
if (num_added == 0) {
btrfs_add_delayed_iput(async_chunk->inode);
async_chunk->inode = NULL;