aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-10-13 12:28:38 +0100
committerChris Mason <clm@fb.com>2014-11-20 17:14:29 -0800
commite38e2ed701ff5f3d889c8dda5fe863e165e60d61 (patch)
tree315b8703ca6c3317996de05a55ceb252257cbcff /fs/btrfs/extent_io.c
parentBtrfs: deal with convert_extent_bit errors to avoid fs corruption (diff)
downloadlinux-dev-e38e2ed701ff5f3d889c8dda5fe863e165e60d61.tar.xz
linux-dev-e38e2ed701ff5f3d889c8dda5fe863e165e60d61.zip
Btrfs: make find_first_extent_bit be able to cache any state
Right now the only caller of find_first_extent_bit() that is interested in caching extent states (transaction or log commit), never gets an extent state cached. This is because find_first_extent_bit() only caches states that have at least one of the flags EXTENT_IOBITS or EXTENT_BOUNDARY, and the transaction/log commit caller always passes a tree that doesn't have ever extent states with any of those flags (they can only have one of the following flags: EXTENT_DIRTY, EXTENT_NEW or EXTENT_NEED_WAIT). This change together with the following one in the patch series (titled "Btrfs: avoid returning -ENOMEM in convert_extent_bit() too early") will help reduce significantly the chances of calls to convert_extent_bit() fail with -ENOMEM when called from the transaction/log commit code. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 420fe26d32d5..0d931b143c00 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -796,17 +796,25 @@ static void set_state_bits(struct extent_io_tree *tree,
state->state |= bits_to_set;
}
-static void cache_state(struct extent_state *state,
- struct extent_state **cached_ptr)
+static void cache_state_if_flags(struct extent_state *state,
+ struct extent_state **cached_ptr,
+ const u64 flags)
{
if (cached_ptr && !(*cached_ptr)) {
- if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
+ if (!flags || (state->state & flags)) {
*cached_ptr = state;
atomic_inc(&state->refs);
}
}
}
+static void cache_state(struct extent_state *state,
+ struct extent_state **cached_ptr)
+{
+ return cache_state_if_flags(state, cached_ptr,
+ EXTENT_IOBITS | EXTENT_BOUNDARY);
+}
+
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
@@ -1482,7 +1490,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
state = find_first_extent_bit_state(tree, start, bits);
got_it:
if (state) {
- cache_state(state, cached_state);
+ cache_state_if_flags(state, cached_state, 0);
*start_ret = state->start;
*end_ret = state->end;
ret = 0;