summaryrefslogtreecommitdiffstats
path: root/sys/nfs
diff options
context:
space:
mode:
authorthib <thib@openbsd.org>2009-08-14 17:52:18 +0000
committerthib <thib@openbsd.org>2009-08-14 17:52:18 +0000
commit946f48e0f6bc545edb2c0a5580aed1a07acf7b6c (patch)
tree157d7d59b4060ec531e65b7ffb1aead18fd032c9 /sys/nfs
parenttweak the vmstat description; (diff)
downloadwireguard-openbsd-946f48e0f6bc545edb2c0a5580aed1a07acf7b6c.tar.xz
wireguard-openbsd-946f48e0f6bc545edb2c0a5580aed1a07acf7b6c.zip
Use the nfs_hashlock to protect the nfs_nodetree hanging of the mount.
What can happen is that a recycling of a vnode could pull one from out under us (since NFS has issues with ref counts...). Dance around getnewvnode() since we can end up recycling vnodes that where formerly owned by NFS, causing recursive locking. We where lucky with the old hashtables has the race was rare but now with more aggresive recycling we loose, just as theo found out on vax. help from oga, beck and blambert (beck mostly screamed though). ok oga@, beck@, blambert@
Diffstat (limited to 'sys/nfs')
-rw-r--r--sys/nfs/nfs_node.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/sys/nfs/nfs_node.c b/sys/nfs/nfs_node.c
index 70ccacb047d..2fcfcf3c7ed 100644
--- a/sys/nfs/nfs_node.c
+++ b/sys/nfs/nfs_node.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: nfs_node.c,v 1.47 2009/08/11 17:06:11 thib Exp $ */
+/* $OpenBSD: nfs_node.c,v 1.48 2009/08/14 17:52:18 thib Exp $ */
/* $NetBSD: nfs_node.c,v 1.16 1996/02/18 11:53:42 fvdl Exp $ */
/*
@@ -83,7 +83,7 @@ nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp)
{
extern int (**nfsv2_vnodeop_p)(void *); /* XXX */
struct nfsmount *nmp;
- struct nfsnode *np, find;
+ struct nfsnode *np, find, *np2;
struct vnode *vp, *nvp;
struct proc *p = curproc; /* XXX */
int error;
@@ -91,28 +91,43 @@ nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp)
nmp = VFSTONFS(mnt);
loop:
- /* XXXTHIB: locking. */
+ rw_enter_write(&nfs_hashlock);
find.n_fhp = fh;
find.n_fhsize = fhsize;
np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
if (np != NULL) {
+ rw_exit_write(&nfs_hashlock);
vp = NFSTOV(np);
- if (vget(vp, LK_EXCLUSIVE, p))
+ error = vget(vp, LK_EXCLUSIVE, p);
+ if (error)
goto loop;
*npp = np;
return (0);
}
- if (rw_enter(&nfs_hashlock, RW_WRITE|RW_SLEEPFAIL))
- goto loop;
-
+ /*
+ * getnewvnode() could recycle a vnode, potentially formerly
+ * owned by NFS. This will cause a VOP_RECLAIM() to happen,
+ * which will cause recursive locking, so we unlock before
+ * calling getnewvnode() lock again afterwards, but must check
+ * to see if this nfsnode has been added while we did not hold
+ * the lock.
+ */
+ rw_exit_write(&nfs_hashlock);
error = getnewvnode(VT_NFS, mnt, nfsv2_vnodeop_p, &nvp);
+ rw_enter_write(&nfs_hashlock);
if (error) {
*npp = NULL;
- rw_exit(&nfs_hashlock);
+ rw_exit_write(&nfs_hashlock);
return (error);
}
+ np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
+ if (np != NULL) {
+ rw_exit_write(&nfs_hashlock);
+ goto loop;
+ }
+
vp = nvp;
np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
vp->v_data = np;
@@ -133,7 +148,8 @@ loop:
np->n_fhp = &np->n_fh;
bcopy(fh, np->n_fhp, fhsize);
np->n_fhsize = fhsize;
- RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np);
+ np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np);
+ KASSERT(np2 == NULL);
np->n_accstamp = -1;
rw_exit(&nfs_hashlock);
*npp = np;
@@ -190,7 +206,9 @@ nfs_reclaim(void *v)
vprint("nfs_reclaim: pushing active", vp);
#endif
+ rw_enter_write(&nfs_hashlock);
RB_REMOVE(nfs_nodetree, &nmp->nm_ntree, np);
+ rw_exit_write(&nfs_hashlock);
if (np->n_rcred)
crfree(np->n_rcred);