aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/libxfs/xfs_rmap_btree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs/xfs_rmap_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c315
1 files changed, 210 insertions, 105 deletions
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index fc78efa52c94..7f83f62e51e0 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -9,18 +9,21 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
+static struct kmem_cache *xfs_rmapbt_cur_cache;
+
/*
* Reverse map btree.
*
@@ -51,65 +54,60 @@ xfs_rmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno);
+ cur->bc_ag.agbp, cur->bc_ag.pag);
}
STATIC void
xfs_rmapbt_set_root(
- struct xfs_btree_cur *cur,
- union xfs_btree_ptr *ptr,
- int inc)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *ptr,
+ int inc)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
- xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
- struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- pag->pagf_levels[btnum] += inc;
- xfs_perag_put(pag);
+ cur->bc_ag.pag->pagf_levels[btnum] += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
STATIC int
xfs_rmapbt_alloc_block(
- struct xfs_btree_cur *cur,
- union xfs_btree_ptr *start,
- union xfs_btree_ptr *new,
- int *stat)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_perag *pag = cur->bc_ag.pag;
int error;
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ error = xfs_alloc_get_freelist(pag, cur->bc_tp, cur->bc_ag.agbp,
&bno, 1);
if (error)
return error;
- trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
- bno, 1);
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
}
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
- false);
+ xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
- xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno);
+ xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
*stat = 1;
return 0;
@@ -120,26 +118,25 @@ xfs_rmapbt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_perag *pag = cur->bc_ag.pag;
xfs_agblock_t bno;
int error;
- bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
- trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
+ trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+ error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
- xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
- xfs_trans_agbtree_delta(cur->bc_tp, -1);
-
- xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno);
+ xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
return 0;
}
@@ -161,8 +158,8 @@ xfs_rmapbt_get_maxrecs(
STATIC void
xfs_rmapbt_init_key_from_rec(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
{
key->rmap.rm_startblock = rec->rmap.rm_startblock;
key->rmap.rm_owner = rec->rmap.rm_owner;
@@ -178,11 +175,11 @@ xfs_rmapbt_init_key_from_rec(
*/
STATIC void
xfs_rmapbt_init_high_key_from_rec(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
{
- uint64_t off;
- int adj;
+ uint64_t off;
+ int adj;
adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
@@ -215,22 +212,22 @@ xfs_rmapbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
STATIC int64_t
xfs_rmapbt_key_diff(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *key)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key)
{
- struct xfs_rmap_irec *rec = &cur->bc_rec.r;
- struct xfs_rmap_key *kp = &key->rmap;
- __u64 x, y;
- int64_t d;
+ struct xfs_rmap_irec *rec = &cur->bc_rec.r;
+ const struct xfs_rmap_key *kp = &key->rmap;
+ __u64 x, y;
+ int64_t d;
d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
if (d)
@@ -254,14 +251,14 @@ xfs_rmapbt_key_diff(
STATIC int64_t
xfs_rmapbt_diff_two_keys(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *k1,
- union xfs_btree_key *k2)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2)
{
- struct xfs_rmap_key *kp1 = &k1->rmap;
- struct xfs_rmap_key *kp2 = &k2->rmap;
- int64_t d;
- __u64 x, y;
+ const struct xfs_rmap_key *kp1 = &k1->rmap;
+ const struct xfs_rmap_key *kp2 = &k2->rmap;
+ int64_t d;
+ __u64 x, y;
d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
be32_to_cpu(kp2->rm_startblock);
@@ -309,7 +306,7 @@ xfs_rmapbt_verify(
if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return __this_address;
fa = xfs_btree_sblock_v5hdr_verify(bp);
if (fa)
@@ -369,9 +366,9 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
STATIC int
xfs_rmapbt_keys_inorder(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *k1,
- union xfs_btree_key *k2)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2)
{
uint32_t x;
uint32_t y;
@@ -399,9 +396,9 @@ xfs_rmapbt_keys_inorder(
STATIC int
xfs_rmapbt_recs_inorder(
- struct xfs_btree_cur *cur,
- union xfs_btree_rec *r1,
- union xfs_btree_rec *r2)
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_rec *r1,
+ const union xfs_btree_rec *r2)
{
uint32_t x;
uint32_t y;
@@ -448,37 +445,95 @@ static const struct xfs_btree_ops xfs_rmapbt_ops = {
.recs_inorder = xfs_rmapbt_recs_inorder,
};
-/*
- * Allocate a new allocation btree cursor.
- */
-struct xfs_btree_cur *
-xfs_rmapbt_init_cursor(
+static struct xfs_btree_cur *
+xfs_rmapbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_buf *agbp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
- cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
- cur->bc_tp = tp;
- cur->bc_mp = mp;
/* Overlapping btree; 2 keys per pointer. */
- cur->bc_btnum = XFS_BTNUM_RMAP;
+ cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
+ mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
- cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ops = &xfs_rmapbt_ops;
+
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
+
+ return cur;
+}
+
+/* Create a new reverse mapping btree cursor. */
+struct xfs_btree_cur *
+xfs_rmapbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ struct xfs_perag *pag)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_rmapbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
+ cur->bc_ag.agbp = agbp;
+ return cur;
+}
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
+/* Create a new reverse mapping btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_rmapbt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ struct xfs_perag *pag)
+{
+ struct xfs_btree_cur *cur;
+ cur = xfs_rmapbt_init_common(mp, NULL, pag);
+ xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
/*
+ * Install a new reverse mapping btree root. Caller is responsible for
+ * invalidating and freeing the old btree blocks.
+ */
+void
+xfs_rmapbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
+ agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
+ agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
+ XFS_AGF_RMAP_BLOCKS);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
+}
+
+/* Calculate number of records in a reverse mapping btree block. */
+static inline unsigned int
+xfs_rmapbt_block_maxrecs(
+ unsigned int blocklen,
+ bool leaf)
+{
+ if (leaf)
+ return blocklen / sizeof(struct xfs_rmap_rec);
+ return blocklen /
+ (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
+}
+
+/*
* Calculate number of records in an rmap btree block.
*/
int
@@ -487,11 +542,33 @@ xfs_rmapbt_maxrecs(
int leaf)
{
blocklen -= XFS_RMAP_BLOCK_LEN;
+ return xfs_rmapbt_block_maxrecs(blocklen, leaf);
+}
- if (leaf)
- return blocklen / sizeof(struct xfs_rmap_rec);
- return blocklen /
- (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
+/* Compute the max possible height for reverse mapping btrees. */
+unsigned int
+xfs_rmapbt_maxlevels_ondisk(void)
+{
+ unsigned int minrecs[2];
+ unsigned int blocklen;
+
+ blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
+
+ minrecs[0] = xfs_rmapbt_block_maxrecs(blocklen, true) / 2;
+ minrecs[1] = xfs_rmapbt_block_maxrecs(blocklen, false) / 2;
+
+ /*
+ * Compute the asymptotic maxlevels for an rmapbt on any reflink fs.
+ *
+ * On a reflink filesystem, each AG block can have up to 2^32 (per the
+ * refcount record format) owners, which means that theoretically we
+ * could face up to 2^64 rmap records. However, we're likely to run
+ * out of blocks in the AG long before that happens, which means that
+ * we must compute the max height based on what the btree will look
+ * like if it consumes almost all the blocks in the AG due to maximal
+ * sharing factor.
+ */
+ return xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS);
}
/* Compute the maximum height of an rmap btree. */
@@ -499,26 +576,36 @@ void
xfs_rmapbt_compute_maxlevels(
struct xfs_mount *mp)
{
- /*
- * On a non-reflink filesystem, the maximum number of rmap
- * records is the number of blocks in the AG, hence the max
- * rmapbt height is log_$maxrecs($agblocks). However, with
- * reflink each AG block can have up to 2^32 (per the refcount
- * record format) owners, which means that theoretically we
- * could face up to 2^64 rmap records.
- *
- * That effectively means that the max rmapbt height must be
- * XFS_BTREE_MAXLEVELS. "Fortunately" we'll run out of AG
- * blocks to feed the rmapbt long before the rmapbt reaches
- * maximum height. The reflink code uses ag_resv_critical to
- * disallow reflinking when less than 10% of the per-AG metadata
- * block reservation since the fallback is a regular file copy.
- */
- if (xfs_sb_version_hasreflink(&mp->m_sb))
- mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
- else
+ if (!xfs_has_rmapbt(mp)) {
+ mp->m_rmap_maxlevels = 0;
+ return;
+ }
+
+ if (xfs_has_reflink(mp)) {
+ /*
+ * Compute the asymptotic maxlevels for an rmap btree on a
+ * filesystem that supports reflink.
+ *
+ * On a reflink filesystem, each AG block can have up to 2^32
+ * (per the refcount record format) owners, which means that
+ * theoretically we could face up to 2^64 rmap records.
+ * However, we're likely to run out of blocks in the AG long
+ * before that happens, which means that we must compute the
+ * max height based on what the btree will look like if it
+ * consumes almost all the blocks in the AG due to maximal
+ * sharing factor.
+ */
+ mp->m_rmap_maxlevels = xfs_btree_space_to_height(mp->m_rmap_mnr,
+ mp->m_sb.sb_agblocks);
+ } else {
+ /*
+ * If there's no block sharing, compute the maximum rmapbt
+ * height assuming one rmap record per AG block.
+ */
mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
+ }
+ ASSERT(mp->m_rmap_maxlevels <= xfs_rmapbt_maxlevels_ondisk());
}
/* Calculate the refcount btree size for some records. */
@@ -552,7 +639,7 @@ int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -562,14 +649,14 @@ xfs_rmapbt_calc_reserves(
xfs_extlen_t tree_len;
int error;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
return error;
- agf = XFS_BUF_TO_AGF(agbp);
+ agf = agbp->b_addr;
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
xfs_trans_brelse(tp, agbp);
@@ -579,8 +666,7 @@ xfs_rmapbt_calc_reserves(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
- if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ if (xfs_ag_contains_log(mp, pag->pag_agno))
agblocks -= mp->m_sb.sb_logblocks;
/* Reserve 1% of the AG or enough for 1 block per record. */
@@ -589,3 +675,22 @@ xfs_rmapbt_calc_reserves(
return error;
}
+
+int __init
+xfs_rmapbt_init_cur_cache(void)
+{
+ xfs_rmapbt_cur_cache = kmem_cache_create("xfs_rmapbt_cur",
+ xfs_btree_cur_sizeof(xfs_rmapbt_maxlevels_ondisk()),
+ 0, 0, NULL);
+
+ if (!xfs_rmapbt_cur_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void
+xfs_rmapbt_destroy_cur_cache(void)
+{
+ kmem_cache_destroy(xfs_rmapbt_cur_cache);
+ xfs_rmapbt_cur_cache = NULL;
+}