aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ordered-data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r--fs/btrfs/ordered-data.c56
1 files changed, 52 insertions, 4 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 52889da69113..1744ba8b2754 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -13,6 +13,7 @@
#include "extent_io.h"
#include "disk-io.h"
#include "compression.h"
+#include "delalloc-space.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
@@ -924,14 +925,16 @@ out:
* be reclaimed before their checksum is actually put into the btree
*/
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
- u32 *sum, int len)
+ u8 *sum, int len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *ordered_sum;
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = btrfs_inode_sectorsize(inode);
+ const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int index = 0;
ordered = btrfs_lookup_ordered_extent(inode, offset);
@@ -947,10 +950,10 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
num_sectors = ordered_sum->len >>
inode->i_sb->s_blocksize_bits;
num_sectors = min_t(int, len - index, num_sectors - i);
- memcpy(sum + index, ordered_sum->sums + i,
- num_sectors);
+ memcpy(sum + index, ordered_sum->sums + i * csum_size,
+ num_sectors * csum_size);
- index += (int)num_sectors;
+ index += (int)num_sectors * csum_size;
if (index == len)
goto out;
disk_bytenr += num_sectors * sectorsize;
@@ -962,6 +965,51 @@ out:
return index;
}
+/*
+ * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
+ * ordered extents in it are run to completion.
+ *
+ * @tree: IO tree used for locking out other users of the range
+ * @inode: Inode whose ordered tree is to be searched
+ * @start: Beginning of range to flush
+ * @end: Last byte of range to lock
+ * @cached_state: If passed, will return the extent state responsible for the
+ * locked range. It's the caller's responsibility to free the cached state.
+ *
+ * This function always returns with the given range locked, ensuring after it's
+ * called no order extent can be pending.
+ */
+void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
+ struct btrfs_inode *inode, u64 start,
+ u64 end,
+ struct extent_state **cached_state)
+{
+ struct btrfs_ordered_extent *ordered;
+ struct extent_state *cachedp = NULL;
+
+ if (cached_state)
+ cachedp = *cached_state;
+
+ while (1) {
+ lock_extent_bits(tree, start, end, &cachedp);
+ ordered = btrfs_lookup_ordered_range(inode, start,
+ end - start + 1);
+ if (!ordered) {
+ /*
+ * If no external cached_state has been passed then
+ * decrement the extra ref taken for cachedp since we
+ * aren't exposing it outside of this function
+ */
+ if (!cached_state)
+ refcount_dec(&cachedp->refs);
+ break;
+ }
+ unlock_extent_cached(tree, start, end, &cachedp);
+ btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
+ btrfs_put_ordered_extent(ordered);
+ }
+}
+
int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",