aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2020-04-29 16:04:44 +0200
committerDavid Sterba <dsterba@suse.com>2020-05-25 11:25:32 +0200
commit1441ed9b7a0f53f47afd0b5ccdcb447e50559165 (patch)
tree065cbe47769cdc9858ce9ef171dd03e1f25212ad /fs/btrfs
parentbtrfs: add separate bounds checker for set/get helpers (diff)
downloadwireguard-linux-1441ed9b7a0f53f47afd0b5ccdcb447e50559165.tar.xz
wireguard-linux-1441ed9b7a0f53f47afd0b5ccdcb447e50559165.zip
btrfs: speed up btrfs_get_##bits helpers
The helpers unconditionally call map_private_extent_buffer to get the address of page containing the requested offset plus the mapping start and length. Depending on the return value, the fast path uses unaligned read to get data within a page, or fall back to read_extent_buffer that can handle reads spanning more pages. This is all wasteful. We know the number of bytes to read, 1/2/4/8 and can find out the page. Then simply check if it's contained or the fallback is needed. This saves one function call to map_private_extent_buffer and several unnecessary temporary variables. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/struct-funcs.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index 68c02997e60d..e6d2bd019444 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -99,28 +99,19 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off) \
{ \
- unsigned long part_offset = (unsigned long)ptr; \
- unsigned long offset = part_offset + off; \
- void *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- int size = sizeof(u##bits); \
- u##bits res; \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long oip = offset_in_page(member_offset); \
+ const int size = sizeof(u##bits); \
+ __le##bits leres; \
\
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
- err = map_private_extent_buffer(eb, offset, size, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits leres; \
- \
- read_extent_buffer(eb, &leres, offset, size); \
- return le##bits##_to_cpu(leres); \
+ if (oip + size <= PAGE_SIZE) { \
+ const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ const char *kaddr = page_address(eb->pages[idx]); \
+ return get_unaligned_le##bits(kaddr + oip); \
} \
- p = kaddr + part_offset - map_start; \
- res = get_unaligned_le##bits(p + off); \
- return res; \
+ read_extent_buffer(eb, &leres, member_offset, size); \
+ return le##bits##_to_cpu(leres); \
} \
void btrfs_set_token_##bits(struct btrfs_map_token *token, \
const void *ptr, unsigned long off, \