aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_bmap_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_bmap_util.c')
-rw-r--r--fs/xfs/xfs_bmap_util.c255
1 files changed, 41 insertions, 214 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 4f443703065e..2efd78a9719e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -53,15 +53,16 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
*/
int
xfs_zero_extent(
- struct xfs_inode *ip,
- xfs_fsblock_t start_fsb,
- xfs_off_t count_fsb)
+ struct xfs_inode *ip,
+ xfs_fsblock_t start_fsb,
+ xfs_off_t count_fsb)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
- sector_t block = XFS_BB_TO_FSBT(mp, sector);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
+ sector_t block = XFS_BB_TO_FSBT(mp, sector);
- return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
+ return blkdev_issue_zeroout(target->bt_bdev,
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, 0);
@@ -164,13 +165,6 @@ xfs_bmap_rtalloc(
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
-
- /* Zero the extent if we were asked to do so */
- if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
- error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
- if (error)
- return error;
- }
} else {
ap->length = 0;
}
@@ -179,29 +173,6 @@ xfs_bmap_rtalloc(
#endif /* CONFIG_XFS_RT */
/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary. All offsets are considered outside
- * the end of file for an empty fork, so 1 is returned in *eof in that case.
- */
-int
-xfs_bmap_eof(
- struct xfs_inode *ip,
- xfs_fileoff_t endoff,
- int whichfork,
- int *eof)
-{
- struct xfs_bmbt_irec rec;
- int error;
-
- error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
- if (error || *eof)
- return error;
-
- *eof = endoff >= rec.br_startoff + rec.br_blockcount;
- return 0;
-}
-
-/*
* Extent tree block counting routines.
*/
@@ -229,106 +200,6 @@ xfs_bmap_count_leaves(
}
/*
- * Count leaf blocks given a range of extent records originally
- * in btree format.
- */
-STATIC void
-xfs_bmap_disk_count_leaves(
- struct xfs_mount *mp,
- struct xfs_btree_block *block,
- int numrecs,
- xfs_filblks_t *count)
-{
- int b;
- xfs_bmbt_rec_t *frp;
-
- for (b = 1; b <= numrecs; b++) {
- frp = XFS_BMBT_REC_ADDR(mp, block, b);
- *count += xfs_bmbt_disk_get_blockcount(frp);
- }
-}
-
-/*
- * Recursively walks each level of a btree
- * to count total fsblocks in use.
- */
-STATIC int
-xfs_bmap_count_tree(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_ifork *ifp,
- xfs_fsblock_t blockno,
- int levelin,
- xfs_extnum_t *nextents,
- xfs_filblks_t *count)
-{
- int error;
- struct xfs_buf *bp, *nbp;
- int level = levelin;
- __be64 *pp;
- xfs_fsblock_t bno = blockno;
- xfs_fsblock_t nextbno;
- struct xfs_btree_block *block, *nextblock;
- int numrecs;
-
- error = xfs_btree_read_bufl(mp, tp, bno, &bp, XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
-
- if (--level) {
- /* Not at node above leaves, count this level of nodes */
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- while (nextbno != NULLFSBLOCK) {
- error = xfs_btree_read_bufl(mp, tp, nextbno, &nbp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- nextblock = XFS_BUF_TO_BLOCK(nbp);
- nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
- xfs_trans_brelse(tp, nbp);
- }
-
- /* Dive to the next level */
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
- count);
- if (error) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- xfs_trans_brelse(tp, bp);
- } else {
- /* count all level 1 nodes and their leaves */
- for (;;) {
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- numrecs = be16_to_cpu(block->bb_numrecs);
- (*nextents) += numrecs;
- xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
- xfs_trans_brelse(tp, bp);
- if (nextbno == NULLFSBLOCK)
- break;
- bno = nextbno;
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
- }
- }
- return 0;
-}
-
-/*
* Count fsblocks of the given fork. Delayed allocation extents are
* not counted towards the totals.
*/
@@ -340,26 +211,19 @@ xfs_bmap_count_blocks(
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
- struct xfs_mount *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- struct xfs_btree_block *block; /* current btree block */
- struct xfs_ifork *ifp; /* fork structure */
- xfs_fsblock_t bno; /* block # of "block" */
- int level; /* btree level, for checking */
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur;
+ xfs_extlen_t btblocks = 0;
int error;
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
*nextents = 0;
*count = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+
if (!ifp)
return 0;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
- case XFS_DINODE_FMT_EXTENTS:
- *nextents = xfs_bmap_count_leaves(ifp, count);
- return 0;
case XFS_DINODE_FMT_BTREE:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
@@ -367,26 +231,23 @@ xfs_bmap_count_blocks(
return error;
}
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ error = xfs_btree_count_blocks(cur, &btblocks);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ return error;
+
/*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+ * xfs_btree_count_blocks includes the root block contained in
+ * the inode fork in @btblocks, so subtract one because we're
+ * only interested in allocated disk blocks.
*/
- block = ifp->if_broot;
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
-
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
- nextents, count);
- if (error) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- return 0;
+ *count += btblocks - 1;
+
+ /* fall through */
+ case XFS_DINODE_FMT_EXTENTS:
+ *nextents = xfs_bmap_count_leaves(ifp, count);
+ break;
}
return 0;
@@ -964,8 +825,8 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, resblks,
- imapp, &nimaps);
+ allocatesize_fsb, alloc_type, 0, imapp,
+ &nimaps);
if (error)
goto error0;
@@ -1039,6 +900,7 @@ out_trans_cancel:
goto out_unlock;
}
+/* Caller must first wait for the completion of any pending DIOs if required. */
int
xfs_flush_unmap_range(
struct xfs_inode *ip,
@@ -1050,9 +912,6 @@ xfs_flush_unmap_range(
xfs_off_t rounding, start, end;
int error;
- /* wait for the completion of any pending DIOs */
- inode_dio_wait(inode);
-
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
@@ -1084,10 +943,6 @@ xfs_free_file_space(
if (len <= 0) /* if nothing being freed */
return 0;
- error = xfs_flush_unmap_range(ip, offset, len);
- if (error)
- return error;
-
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
@@ -1113,7 +968,8 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
+ &xfs_buffered_write_iomap_ops);
if (error)
return error;
@@ -1131,43 +987,6 @@ xfs_free_file_space(
return error;
}
-/*
- * Preallocate and zero a range of a file. This mechanism has the allocation
- * semantics of fallocate and in addition converts data in the range to zeroes.
- */
-int
-xfs_zero_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len)
-{
- struct xfs_mount *mp = ip->i_mount;
- uint blksize;
- int error;
-
- trace_xfs_zero_file_space(ip);
-
- blksize = 1 << mp->m_sb.sb_blocklog;
-
- /*
- * Punch a hole and prealloc the range. We use hole punch rather than
- * unwritten extent conversion for two reasons:
- *
- * 1.) Hole punch handles partial block zeroing for us.
- *
- * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
- * by virtue of the hole punch.
- */
- error = xfs_free_file_space(ip, offset, len);
- if (error || xfs_is_always_cow_inode(ip))
- return error;
-
- return xfs_alloc_file_space(ip, round_down(offset, blksize),
- round_up(offset + len, blksize) -
- round_down(offset, blksize),
- XFS_BMAPI_PREALLOC);
-}
-
static int
xfs_prepare_shift(
struct xfs_inode *ip,
@@ -1750,6 +1569,14 @@ xfs_swap_extents(
goto out_unlock;
}
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ goto out_unlock;
+
+ error = xfs_qm_dqattach(tip);
+ if (error)
+ goto out_unlock;
+
error = xfs_swap_extent_flush(ip);
if (error)
goto out_unlock;