aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c6
-rw-r--r--fs/xfs/xfs_ag.h3
-rw-r--r--fs/xfs/xfs_mount.c25
3 files changed, 23 insertions, 11 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 81976ffed7d6..3a1d229b4784 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -150,17 +150,17 @@ xfs_inode_ag_iter_next_pag(
int found;
int ref;
- spin_lock(&mp->m_perag_lock);
+ rcu_read_lock();
found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
(void **)&pag, *first, 1, tag);
if (found <= 0) {
- spin_unlock(&mp->m_perag_lock);
+ rcu_read_unlock();
return NULL;
}
*first = pag->pag_agno + 1;
/* open coded pag reference increment */
ref = atomic_inc_return(&pag->pag_ref);
- spin_unlock(&mp->m_perag_lock);
+ rcu_read_unlock();
trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
} else {
pag = xfs_perag_get(mp, *first);
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 4917d4eed4ed..51c42c202bf1 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -230,6 +230,9 @@ typedef struct xfs_perag {
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
int pag_ici_reclaimable; /* reclaimable inodes */
+
+ /* for rcu-safe freeing */
+ struct rcu_head rcu_head;
#endif
int pagb_count; /* pagb slots in use */
} xfs_perag_t;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 00c7a876807d..14fc6e9e1816 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -199,6 +199,8 @@ xfs_uuid_unmount(
/*
* Reference counting access wrappers to the perag structures.
+ * Because we never free per-ag structures, the only thing we
+ * have to protect against changes is the tree structure itself.
*/
struct xfs_perag *
xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,13 +208,13 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
struct xfs_perag *pag;
int ref = 0;
- spin_lock(&mp->m_perag_lock);
+ rcu_read_lock();
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
if (pag) {
ASSERT(atomic_read(&pag->pag_ref) >= 0);
ref = atomic_inc_return(&pag->pag_ref);
}
- spin_unlock(&mp->m_perag_lock);
+ rcu_read_unlock();
trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
return pag;
}
@@ -227,10 +229,18 @@ xfs_perag_put(struct xfs_perag *pag)
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
}
+STATIC void
+__xfs_free_perag(
+ struct rcu_head *head)
+{
+ struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ kmem_free(pag);
+}
+
/*
- * Free up the resources associated with a mount structure. Assume that
- * the structure was initially zeroed, so we can tell which fields got
- * initialized.
+ * Free up the per-ag resources associated with the mount structure.
*/
STATIC void
xfs_free_perag(
@@ -242,10 +252,9 @@ xfs_free_perag(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
spin_lock(&mp->m_perag_lock);
pag = radix_tree_delete(&mp->m_perag_tree, agno);
- ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
spin_unlock(&mp->m_perag_lock);
- kmem_free(pag);
+ ASSERT(pag);
+ call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}