aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorIra Weiny <ira.weiny@intel.com>2021-02-16 18:48:23 -0800
committerDavid Sterba <dsterba@suse.com>2021-04-19 17:25:16 +0200
commit58c1a35cd52268ae1524b3ff5eac9fa2414e6efc (patch)
treec7b88700119a7542dba4feba9be3d7475ba48f86 /fs/btrfs
parentbtrfs: remove duplicated in_range() macro (diff)
downloadlinux-dev-58c1a35cd52268ae1524b3ff5eac9fa2414e6efc.tar.xz
linux-dev-58c1a35cd52268ae1524b3ff5eac9fa2414e6efc.zip
btrfs: convert kmap to kmap_local_page, simple cases
Use a simple coccinelle script to help convert the most common kmap()/kunmap() patterns to kmap_local_page()/kunmap_local(). Note that some kmaps which were caught by this script needed to be handled by hand because of the strict unmapping order of kunmap_local() so they are not included in this patch. But this script got us started. There's another temp variable added for the final length write to the first page so it does not interfere with cpage_out that is used for mapping other pages. The development of this patch was aided by the follow script: // <smpl> // SPDX-License-Identifier: GPL-2.0-only // Find kmap and replace with kmap_local_page then mark kunmap // // Confidence: Low // Copyright: (C) 2021 Intel Corporation // URL: http://coccinelle.lip6.fr/ @ catch_all @ expression e, e2; @@ ( -kmap(e) +kmap_local_page(e) ) ... ( -kunmap(...) +kunmap_local() ) // </smpl> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/lzo.c9
-rw-r--r--fs/btrfs/raid56.c4
4 files changed, 10 insertions, 11 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3f4c832abfed..2600703fab83 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1611,7 +1611,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
curr_sample_pos = 0;
while (index < index_end) {
page = find_get_page(inode->i_mapping, index);
- in_data = kmap(page);
+ in_data = kmap_local_page(page);
/* Handle case where the start is not aligned to PAGE_SIZE */
i = start % PAGE_SIZE;
while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
@@ -1624,7 +1624,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
start += SAMPLING_INTERVAL;
curr_sample_pos += SAMPLING_READ_SIZE;
}
- kunmap(page);
+ kunmap_local(in_data);
put_page(page);
index++;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0e007da64a36..8f21036bcc99 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7019,7 +7019,7 @@ next:
if (ret)
goto out;
} else {
- map = kmap(page);
+ map = kmap_local_page(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
if (pg_offset + copy_size < PAGE_SIZE) {
@@ -7027,7 +7027,7 @@ next:
PAGE_SIZE - pg_offset -
copy_size);
}
- kunmap(page);
+ kunmap_local(map);
}
flush_dcache_page(page);
}
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 9084a950dc09..cd042c7567a4 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -118,7 +118,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
char *data_in;
- char *cpage_out;
+ char *cpage_out, *sizes_ptr;
int nr_pages = 0;
struct page *in_page = NULL;
struct page *out_page = NULL;
@@ -258,10 +258,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
}
/* store the size of all chunks of compressed data */
- cpage_out = kmap(pages[0]);
- write_compress_length(cpage_out, tot_out);
-
- kunmap(pages[0]);
+ sizes_ptr = kmap_local_page(pages[0]);
+ write_compress_length(sizes_ptr, tot_out);
+ kunmap_local(sizes_ptr);
ret = 0;
*total_out = tot_out;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0a2858609bb7..633b4b1b1ee1 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2388,13 +2388,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
/* Check scrubbing parity and repair it */
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
- parity = kmap(p);
+ parity = kmap_local_page(p);
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
copy_page(parity, pointers[rbio->scrubp]);
else
/* Parity is right, needn't writeback */
bitmap_clear(rbio->dbitmap, pagenr, 1);
- kunmap(p);
+ kunmap_local(parity);
for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));