aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_iomap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_iomap.c')
-rw-r--r--fs/xfs/xfs_iomap.c186
1 files changed, 110 insertions, 76 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 046469fcc1b8..c6ce6f9335b6 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -224,7 +224,7 @@ xfs_iomap_write_direct(
* necessary and move on to transaction setup.
*/
xfs_iunlock(ip, lockmode);
- error = xfs_qm_dqattach(ip, 0);
+ error = xfs_qm_dqattach(ip);
if (error)
return error;
@@ -576,7 +576,7 @@ xfs_file_iomap_begin_delay(
goto done;
}
- error = xfs_qm_dqattach_locked(ip, 0);
+ error = xfs_qm_dqattach_locked(ip, false);
if (error)
goto out_unlock;
@@ -692,7 +692,7 @@ xfs_iomap_write_allocate(
/*
* Make sure that the dquots are there.
*/
- error = xfs_qm_dqattach(ip, 0);
+ error = xfs_qm_dqattach(ip);
if (error)
return error;
@@ -946,8 +946,11 @@ error_on_bmapi_transaction:
return error;
}
-static inline bool imap_needs_alloc(struct inode *inode,
- struct xfs_bmbt_irec *imap, int nimaps)
+static inline bool
+imap_needs_alloc(
+ struct inode *inode,
+ struct xfs_bmbt_irec *imap,
+ int nimaps)
{
return !nimaps ||
imap->br_startblock == HOLESTARTBLOCK ||
@@ -955,31 +958,58 @@ static inline bool imap_needs_alloc(struct inode *inode,
(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
}
-static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps)
+static inline bool
+needs_cow_for_zeroing(
+ struct xfs_bmbt_irec *imap,
+ int nimaps)
{
return nimaps &&
imap->br_startblock != HOLESTARTBLOCK &&
imap->br_state != XFS_EXT_UNWRITTEN;
}
-static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
+static int
+xfs_ilock_for_iomap(
+ struct xfs_inode *ip,
+ unsigned flags,
+ unsigned *lockmode)
{
+ unsigned mode = XFS_ILOCK_SHARED;
+
/*
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
- return true;
+ if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+ /*
+ * FIXME: It could still overwrite on unshared extents and not
+ * need allocation.
+ */
+ if (flags & IOMAP_NOWAIT)
+ return -EAGAIN;
+ mode = XFS_ILOCK_EXCL;
+ }
/*
- * Extents not yet cached requires exclusive access, don't block.
- * This is an opencoded xfs_ilock_data_map_shared() to cater for the
+ * Extents not yet cached requires exclusive access, don't block. This
+ * is an opencoded xfs_ilock_data_map_shared() call but with
* non-blocking behaviour.
*/
- if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
- !(ip->i_df.if_flags & XFS_IFEXTENTS))
- return true;
- return false;
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+ if (flags & IOMAP_NOWAIT)
+ return -EAGAIN;
+ mode = XFS_ILOCK_EXCL;
+ }
+
+ if (flags & IOMAP_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, mode))
+ return -EAGAIN;
+ } else {
+ xfs_ilock(ip, mode);
+ }
+
+ *lockmode = mode;
+ return 0;
}
static int
@@ -1007,19 +1037,15 @@ xfs_file_iomap_begin(
return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
}
- if (need_excl_ilock(ip, flags))
- lockmode = XFS_ILOCK_EXCL;
- else
- lockmode = XFS_ILOCK_SHARED;
-
- if (flags & IOMAP_NOWAIT) {
- if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
- return -EAGAIN;
- if (!xfs_ilock_nowait(ip, lockmode))
- return -EAGAIN;
- } else {
- xfs_ilock(ip, lockmode);
- }
+ /*
+ * Lock the inode in the manner required for the specified operation and
+ * check for as many conditions that would result in blocking as
+ * possible. This removes most of the non-blocking checks from the
+ * mapping code below.
+ */
+ error = xfs_ilock_for_iomap(ip, flags, &lockmode);
+ if (error)
+ return error;
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset > mp->m_super->s_maxbytes - length)
@@ -1040,19 +1066,21 @@ xfs_file_iomap_begin(
goto out_unlock;
}
- if (xfs_is_reflink_inode(ip) &&
- ((flags & IOMAP_WRITE) ||
- ((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) {
+ /* Non-modifying mapping requested, so we are done */
+ if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
+ goto out_found;
+
+ /*
+ * Break shared extents if necessary. Checks for non-blocking IO have
+ * been done up front, so we don't need to do them here.
+ */
+ if (xfs_is_reflink_inode(ip)) {
+ /* if zeroing doesn't need COW allocation, then we are done. */
+ if ((flags & IOMAP_ZERO) &&
+ !needs_cow_for_zeroing(&imap, nimaps))
+ goto out_found;
+
if (flags & IOMAP_DIRECT) {
- /*
- * A reflinked inode will result in CoW alloc.
- * FIXME: It could still overwrite on unshared extents
- * and not need allocation.
- */
- if (flags & IOMAP_NOWAIT) {
- error = -EAGAIN;
- goto out_unlock;
- }
/* may drop and re-acquire the ilock */
error = xfs_reflink_allocate_cow(ip, &imap, &shared,
&lockmode);
@@ -1068,46 +1096,45 @@ xfs_file_iomap_begin(
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
}
- if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
- /*
- * If nowait is set bail since we are going to make
- * allocations.
- */
- if (flags & IOMAP_NOWAIT) {
- error = -EAGAIN;
- goto out_unlock;
- }
- /*
- * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
- * pages to keep the chunks of work done where somewhat symmetric
- * with the work writeback does. This is a completely arbitrary
- * number pulled out of thin air as a best guess for initial
- * testing.
- *
- * Note that the values needs to be less than 32-bits wide until
- * the lower level functions are updated.
- */
- length = min_t(loff_t, length, 1024 * PAGE_SIZE);
- /*
- * xfs_iomap_write_direct() expects the shared lock. It
- * is unlocked on return.
- */
- if (lockmode == XFS_ILOCK_EXCL)
- xfs_ilock_demote(ip, lockmode);
- error = xfs_iomap_write_direct(ip, offset, length, &imap,
- nimaps);
- if (error)
- return error;
+ /* Don't need to allocate over holes when doing zeroing operations. */
+ if (flags & IOMAP_ZERO)
+ goto out_found;
- iomap->flags = IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
- } else {
- ASSERT(nimaps);
+ if (!imap_needs_alloc(inode, &imap, nimaps))
+ goto out_found;
- xfs_iunlock(ip, lockmode);
- trace_xfs_iomap_found(ip, offset, length, 0, &imap);
+ /* If nowait is set bail since we are going to make allocations. */
+ if (flags & IOMAP_NOWAIT) {
+ error = -EAGAIN;
+ goto out_unlock;
}
+ /*
+ * We cap the maximum length we map to a sane size to keep the chunks
+ * of work done where somewhat symmetric with the work writeback does.
+ * This is a completely arbitrary number pulled out of thin air as a
+ * best guess for initial testing.
+ *
+ * Note that the values needs to be less than 32-bits wide until the
+ * lower level functions are updated.
+ */
+ length = min_t(loff_t, length, 1024 * PAGE_SIZE);
+
+ /*
+ * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
+ * return.
+ */
+ if (lockmode == XFS_ILOCK_EXCL)
+ xfs_ilock_demote(ip, lockmode);
+ error = xfs_iomap_write_direct(ip, offset, length, &imap,
+ nimaps);
+ if (error)
+ return error;
+
+ iomap->flags = IOMAP_F_NEW;
+ trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
+
+out_finish:
if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields
& ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY;
@@ -1117,6 +1144,13 @@ xfs_file_iomap_begin(
if (shared)
iomap->flags |= IOMAP_F_SHARED;
return 0;
+
+out_found:
+ ASSERT(nimaps);
+ xfs_iunlock(ip, lockmode);
+ trace_xfs_iomap_found(ip, offset, length, 0, &imap);
+ goto out_finish;
+
out_unlock:
xfs_iunlock(ip, lockmode);
return error;