aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--fs/xfs/xfs_trans.c195
1 files changed, 195 insertions, 0 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e72730f85af1..44f72c09c203 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -20,6 +20,10 @@
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_dquot_item.h"
+#include "xfs_dquot.h"
+#include "xfs_icache.h"
kmem_zone_t *xfs_trans_zone;
@@ -285,6 +289,17 @@ xfs_trans_alloc(
tp->t_firstblock = NULLFSBLOCK;
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
+ if (error == -ENOSPC) {
+ /*
+ * We weren't able to reserve enough space for the transaction.
+ * Flush the other speculative space allocations to free space.
+ * Do not perform a synchronous scan because callers can hold
+ * other locks.
+ */
+ error = xfs_blockgc_free_space(mp, NULL);
+ if (!error)
+ error = xfs_trans_reserve(tp, resp, blocks, rtextents);
+ }
if (error) {
xfs_trans_cancel(tp);
return error;
@@ -1024,3 +1039,183 @@ xfs_trans_roll(
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
return xfs_trans_reserve(*tpp, &tres, 0, 0);
}
+
+/*
+ * Allocate an transaction, lock and join the inode to it, and reserve quota.
+ *
+ * The caller must ensure that the on-disk dquots attached to this inode have
+ * already been allocated and initialized. The caller is responsible for
+ * releasing ILOCK_EXCL if a new transaction is returned.
+ */
+int
+xfs_trans_alloc_inode(
+ struct xfs_inode *ip,
+ struct xfs_trans_res *resv,
+ unsigned int dblocks,
+ unsigned int rblocks,
+ bool force,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = ip->i_mount;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, resv, dblocks,
+ rblocks / mp->m_sb.sb_rextsize,
+ force ? XFS_TRANS_RESERVE : 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_blockgc_free_quota(ip, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+
+ *tpp = tp;
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+/*
+ * Allocate an transaction in preparation for inode creation by reserving quota
+ * against the given dquots. Callers are not required to hold any inode locks.
+ */
+int
+xfs_trans_alloc_icreate(
+ struct xfs_mount *mp,
+ struct xfs_trans_res *resv,
+ struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
+ unsigned int dblocks,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
+ if (error)
+ return error;
+
+ error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error) {
+ xfs_trans_cancel(tp);
+ return error;
+ }
+
+ *tpp = tp;
+ return 0;
+}
+
+/*
+ * Allocate an transaction, lock and join the inode to it, and reserve quota
+ * in preparation for inode attribute changes that include uid, gid, or prid
+ * changes.
+ *
+ * The caller must ensure that the on-disk dquots attached to this inode have
+ * already been allocated and initialized. The ILOCK will be dropped when the
+ * transaction is committed or cancelled.
+ */
+int
+xfs_trans_alloc_ichange(
+ struct xfs_inode *ip,
+ struct xfs_dquot *new_udqp,
+ struct xfs_dquot *new_gdqp,
+ struct xfs_dquot *new_pdqp,
+ bool force,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ /*
+ * For each quota type, skip quota reservations if the inode's dquots
+ * now match the ones that came from the caller, or the caller didn't
+ * pass one in. The inode's dquots can change if we drop the ILOCK to
+ * perform a blockgc scan, so we must preserve the caller's arguments.
+ */
+ udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
+ gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
+ pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
+ if (udqp || gdqp || pdqp) {
+ unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
+
+ if (force)
+ qflags |= XFS_QMOPT_FORCE_RES;
+
+ /*
+ * Reserve enough quota to handle blocks on disk and reserved
+ * for a delayed allocation. We'll actually transfer the
+ * delalloc reservation between dquots at chown time, even
+ * though that part is only semi-transactional.
+ */
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
+ pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
+ 1, qflags);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+ }
+
+ *tpp = tp;
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(tp);
+ return error;
+}