aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2021-09-15 15:17:18 +0800
committerDavid Sterba <dsterba@suse.com>2021-10-26 19:08:02 +0200
commitc3a3b19baceee1c50aab79a219550f11995f3560 (patch)
treeaacf8efcc8b729623303c15603857de93fd51e30 /fs/btrfs/inode.c
parentbtrfs: remove btrfs_bio_alloc() helper (diff)
downloadlinux-dev-c3a3b19baceee1c50aab79a219550f11995f3560.tar.xz
linux-dev-c3a3b19baceee1c50aab79a219550f11995f3560.zip
btrfs: rename struct btrfs_io_bio to btrfs_bio
Previously we had "struct btrfs_bio", which records IO context for mirrored IO and RAID56, and "strcut btrfs_io_bio", which records extra btrfs specific info for logical bytenr bio. With "btrfs_bio" renamed to "btrfs_io_context", we are safe to rename "btrfs_io_bio" to "btrfs_bio" which is a more suitable name now. The struct btrfs_bio changes meaning by this commit. There was a suggested name like btrfs_logical_bio but it's a bit long and we'd prefer to use a shorter name. This could be a concern for backports to older kernels where the different meaning could possibly cause confusion or bugs. Comparing the new and old structures, there's no overlap among the struct members so a build would break in case of incorrect backport. We haven't had many backports to bio code anyway so this is more of a theoretical cause of bugs and a matter of precaution but we'll need to keep the semantic change in mind. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 027f2ebc8dc3..a643be30c18a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3217,7 +3217,7 @@ void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
*
* The length of such check is always one sector size.
*/
-static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
+static int check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
u32 bio_offset, struct page *page, u32 pgoff,
u64 start)
{
@@ -3233,7 +3233,7 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
ASSERT(pgoff + len <= PAGE_SIZE);
offset_sectors = bio_offset >> fs_info->sectorsize_bits;
- csum_expected = ((u8 *)io_bio->csum) + offset_sectors * csum_size;
+ csum_expected = ((u8 *)bbio->csum) + offset_sectors * csum_size;
kaddr = kmap_atomic(page);
shash->tfm = fs_info->csum_shash;
@@ -3247,9 +3247,9 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
return 0;
zeroit:
btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
- io_bio->mirror_num);
- if (io_bio->device)
- btrfs_dev_stat_inc_and_print(io_bio->device,
+ bbio->mirror_num);
+ if (bbio->device)
+ btrfs_dev_stat_inc_and_print(bbio->device,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
memset(kaddr + pgoff, 1, len);
flush_dcache_page(page);
@@ -3269,8 +3269,9 @@ zeroit:
* Return a bitmap where bit set means a csum mismatch, and bit not set means
* csum match.
*/
-unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
- struct page *page, u64 start, u64 end)
+unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
+ u32 bio_offset, struct page *page,
+ u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -3288,14 +3289,14 @@ unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
* For subpage case, above PageChecked is not safe as it's not subpage
* compatible.
* But for now only cow fixup and compressed read utilize PageChecked
- * flag, while in this context we can easily use io_bio->csum to
+ * flag, while in this context we can easily use bbio->csum to
* determine if we really need to do csum verification.
*
- * So for now, just exit if io_bio->csum is NULL, as it means it's
+ * So for now, just exit if bbio->csum is NULL, as it means it's
* compressed read, and its compressed data csum has already been
* verified.
*/
- if (io_bio->csum == NULL)
+ if (bbio->csum == NULL)
return 0;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
@@ -3322,7 +3323,7 @@ unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
EXTENT_NODATASUM);
continue;
}
- ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
+ ret = check_data_csum(inode, bbio, bio_offset, page, pg_off,
page_offset(page) + pg_off);
if (ret < 0) {
const int nr_bit = (pg_off - offset_in_page(start)) >>
@@ -8082,7 +8083,7 @@ static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio,
}
static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
- struct btrfs_io_bio *io_bio,
+ struct btrfs_bio *bbio,
const bool uptodate)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -8092,11 +8093,11 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct bio_vec bvec;
struct bvec_iter iter;
- u64 start = io_bio->logical;
+ u64 start = bbio->logical;
u32 bio_offset = 0;
blk_status_t err = BLK_STS_OK;
- __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) {
+ __bio_for_each_segment(bvec, &bbio->bio, iter, bbio->iter) {
unsigned int i, nr_sectors, pgoff;
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
@@ -8104,7 +8105,7 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
for (i = 0; i < nr_sectors; i++) {
ASSERT(pgoff < PAGE_SIZE);
if (uptodate &&
- (!csum || !check_data_csum(inode, io_bio,
+ (!csum || !check_data_csum(inode, bbio,
bio_offset, bvec.bv_page,
pgoff, start))) {
clean_io_failure(fs_info, failure_tree, io_tree,
@@ -8114,12 +8115,12 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
} else {
int ret;
- ASSERT((start - io_bio->logical) < UINT_MAX);
+ ASSERT((start - bbio->logical) < UINT_MAX);
ret = btrfs_repair_one_sector(inode,
- &io_bio->bio,
- start - io_bio->logical,
+ &bbio->bio,
+ start - bbio->logical,
bvec.bv_page, pgoff,
- start, io_bio->mirror_num,
+ start, bbio->mirror_num,
submit_dio_repair_bio);
if (ret)
err = errno_to_blk_status(ret);
@@ -8161,8 +8162,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
bio->bi_iter.bi_size, err);
if (bio_op(bio) == REQ_OP_READ) {
- err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio),
- !err);
+ err = btrfs_check_read_dio_bio(dip->inode,
+ btrfs_bio(bio), !err);
}
if (err)
@@ -8213,7 +8214,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
csum_offset = file_offset - dip->logical_offset;
csum_offset >>= fs_info->sectorsize_bits;
csum_offset *= fs_info->csum_size;
- btrfs_io_bio(bio)->csum = dip->csums + csum_offset;
+ btrfs_bio(bio)->csum = dip->csums + csum_offset;
}
map:
ret = btrfs_map_bio(fs_info, bio, 0);
@@ -8329,7 +8330,7 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
- btrfs_io_bio(bio)->logical = file_offset;
+ btrfs_bio(bio)->logical = file_offset;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
status = extract_ordered_extent(BTRFS_I(inode), bio,